2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
539 av_freep(&ost->bsf_extradata_updated);
541 av_frame_free(&ost->filtered_frame);
542 av_frame_free(&ost->last_frame);
543 av_dict_free(&ost->encoder_opts);
545 av_parser_close(ost->parser);
546 avcodec_free_context(&ost->parser_avctx);
548 av_freep(&ost->forced_keyframes);
549 av_expr_free(ost->forced_keyframes_pexpr);
550 av_freep(&ost->avfilter);
551 av_freep(&ost->logfile_prefix);
553 av_freep(&ost->audio_channels_map);
554 ost->audio_channels_mapped = 0;
556 av_dict_free(&ost->sws_dict);
558 avcodec_free_context(&ost->enc_ctx);
559 avcodec_parameters_free(&ost->ref_par);
561 if (ost->muxing_queue) {
562 while (av_fifo_size(ost->muxing_queue)) {
564 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565 av_packet_unref(&pkt);
567 av_fifo_freep(&ost->muxing_queue);
570 av_freep(&output_streams[i]);
573 free_input_threads();
575 for (i = 0; i < nb_input_files; i++) {
576 avformat_close_input(&input_files[i]->ctx);
577 av_freep(&input_files[i]);
579 for (i = 0; i < nb_input_streams; i++) {
580 InputStream *ist = input_streams[i];
582 av_frame_free(&ist->decoded_frame);
583 av_frame_free(&ist->filter_frame);
584 av_dict_free(&ist->decoder_opts);
585 avsubtitle_free(&ist->prev_sub.subtitle);
586 av_frame_free(&ist->sub2video.frame);
587 av_freep(&ist->filters);
588 av_freep(&ist->hwaccel_device);
589 av_freep(&ist->dts_buffer);
591 avcodec_free_context(&ist->dec_ctx);
593 av_freep(&input_streams[i]);
597 if (fclose(vstats_file))
598 av_log(NULL, AV_LOG_ERROR,
599 "Error closing vstats file, loss of information possible: %s\n",
600 av_err2str(AVERROR(errno)));
602 av_freep(&vstats_filename);
604 av_freep(&input_streams);
605 av_freep(&input_files);
606 av_freep(&output_streams);
607 av_freep(&output_files);
611 avformat_network_deinit();
613 if (received_sigterm) {
614 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615 (int) received_sigterm);
616 } else if (ret && atomic_load(&transcode_init_done)) {
617 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
623 void remove_avoptions(AVDictionary **a, AVDictionary *b)
625 AVDictionaryEntry *t = NULL;
627 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
632 void assert_avoptions(AVDictionary *m)
634 AVDictionaryEntry *t;
635 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
641 static void abort_codec_experimental(AVCodec *c, int encoder)
646 static void update_benchmark(const char *fmt, ...)
648 if (do_benchmark_all) {
649 int64_t t = getutime();
655 vsnprintf(buf, sizeof(buf), fmt, va);
657 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
663 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
666 for (i = 0; i < nb_output_streams; i++) {
667 OutputStream *ost2 = output_streams[i];
668 ost2->finished |= ost == ost2 ? this_stream : others;
672 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
674 AVFormatContext *s = of->ctx;
675 AVStream *st = ost->st;
679 * Audio encoders may split the packets -- #frames in != #packets out.
680 * But there is no reordering, so we can limit the number of output packets
681 * by simply dropping them here.
682 * Counting encoded video frames needs to be done separately because of
683 * reordering, see do_video_out().
684 * Do not count the packet when unqueued because it has been counted when queued.
686 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
687 if (ost->frame_number >= ost->max_frames) {
688 av_packet_unref(pkt);
694 if (!of->header_written) {
695 AVPacket tmp_pkt = {0};
696 /* the muxer is not initialized yet, buffer the packet */
697 if (!av_fifo_space(ost->muxing_queue)) {
698 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
699 ost->max_muxing_queue_size);
700 if (new_size <= av_fifo_size(ost->muxing_queue)) {
701 av_log(NULL, AV_LOG_ERROR,
702 "Too many packets buffered for output stream %d:%d.\n",
703 ost->file_index, ost->st->index);
706 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
710 ret = av_packet_ref(&tmp_pkt, pkt);
713 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
714 av_packet_unref(pkt);
718 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
719 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
720 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
722 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
724 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
726 ost->quality = sd ? AV_RL32(sd) : -1;
727 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
729 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
731 ost->error[i] = AV_RL64(sd + 8 + 8*i);
736 if (ost->frame_rate.num && ost->is_cfr) {
737 if (pkt->duration > 0)
738 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
739 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
744 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
746 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
747 if (pkt->dts != AV_NOPTS_VALUE &&
748 pkt->pts != AV_NOPTS_VALUE &&
749 pkt->dts > pkt->pts) {
750 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
752 ost->file_index, ost->st->index);
754 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
755 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
756 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
758 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
759 pkt->dts != AV_NOPTS_VALUE &&
760 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
761 ost->last_mux_dts != AV_NOPTS_VALUE) {
762 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
763 if (pkt->dts < max) {
764 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
765 av_log(s, loglevel, "Non-monotonous DTS in output stream "
766 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
767 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
769 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
772 av_log(s, loglevel, "changing to %"PRId64". This may result "
773 "in incorrect timestamps in the output file.\n",
775 if (pkt->pts >= pkt->dts)
776 pkt->pts = FFMAX(pkt->pts, max);
781 ost->last_mux_dts = pkt->dts;
783 ost->data_size += pkt->size;
784 ost->packets_written++;
786 pkt->stream_index = ost->index;
789 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
790 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791 av_get_media_type_string(ost->enc_ctx->codec_type),
792 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
793 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
798 ret = av_interleaved_write_frame(s, pkt);
800 print_error("av_interleaved_write_frame()", ret);
801 main_return_code = 1;
802 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
804 av_packet_unref(pkt);
807 static void close_output_stream(OutputStream *ost)
809 OutputFile *of = output_files[ost->file_index];
811 ost->finished |= ENCODER_FINISHED;
813 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
814 of->recording_time = FFMIN(of->recording_time, end);
818 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
822 /* apply the output bitstream filters, if any */
823 if (ost->nb_bitstream_filters) {
826 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
832 /* get a packet from the previous filter up the chain */
833 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
834 if (ret == AVERROR(EAGAIN)) {
840 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
841 * the api states this shouldn't happen after init(). Propagate it here to the
842 * muxer and to the next filters in the chain to workaround this.
843 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
844 * par_out->extradata and adapt muxers accordingly to get rid of this. */
845 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
846 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
849 ost->bsf_extradata_updated[idx - 1] |= 1;
852 /* send it to the next filter down the chain or to the muxer */
853 if (idx < ost->nb_bitstream_filters) {
854 /* HACK/FIXME! - See above */
855 if (!(ost->bsf_extradata_updated[idx] & 2)) {
856 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
859 ost->bsf_extradata_updated[idx] |= 2;
861 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
866 write_packet(of, pkt, ost, 0);
869 write_packet(of, pkt, ost, 0);
872 if (ret < 0 && ret != AVERROR_EOF) {
873 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
874 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
880 static int check_recording_time(OutputStream *ost)
882 OutputFile *of = output_files[ost->file_index];
884 if (of->recording_time != INT64_MAX &&
885 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
886 AV_TIME_BASE_Q) >= 0) {
887 close_output_stream(ost);
893 static void do_audio_out(OutputFile *of, OutputStream *ost,
896 AVCodecContext *enc = ost->enc_ctx;
900 av_init_packet(&pkt);
904 if (!check_recording_time(ost))
907 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
908 frame->pts = ost->sync_opts;
909 ost->sync_opts = frame->pts + frame->nb_samples;
910 ost->samples_encoded += frame->nb_samples;
911 ost->frames_encoded++;
913 av_assert0(pkt.size || !pkt.data);
914 update_benchmark(NULL);
916 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
917 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
918 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
919 enc->time_base.num, enc->time_base.den);
922 ret = avcodec_send_frame(enc, frame);
927 ret = avcodec_receive_packet(enc, &pkt);
928 if (ret == AVERROR(EAGAIN))
933 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
935 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
938 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
939 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
940 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
941 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
944 output_packet(of, &pkt, ost);
949 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
953 static void do_subtitle_out(OutputFile *of,
957 int subtitle_out_max_size = 1024 * 1024;
958 int subtitle_out_size, nb, i;
963 if (sub->pts == AV_NOPTS_VALUE) {
964 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
973 subtitle_out = av_malloc(subtitle_out_max_size);
975 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
980 /* Note: DVB subtitle need one packet to draw them and one other
981 packet to clear them */
982 /* XXX: signal it in the codec context ? */
983 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
988 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
990 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
991 pts -= output_files[ost->file_index]->start_time;
992 for (i = 0; i < nb; i++) {
993 unsigned save_num_rects = sub->num_rects;
995 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
996 if (!check_recording_time(ost))
1000 // start_display_time is required to be 0
1001 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1002 sub->end_display_time -= sub->start_display_time;
1003 sub->start_display_time = 0;
1007 ost->frames_encoded++;
1009 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1010 subtitle_out_max_size, sub);
1012 sub->num_rects = save_num_rects;
1013 if (subtitle_out_size < 0) {
1014 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1018 av_init_packet(&pkt);
1019 pkt.data = subtitle_out;
1020 pkt.size = subtitle_out_size;
1021 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1022 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1023 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1024 /* XXX: the pts correction is handled here. Maybe handling
1025 it in the codec would be better */
1027 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1032 output_packet(of, &pkt, ost);
1036 static void do_video_out(OutputFile *of,
1038 AVFrame *next_picture,
1041 int ret, format_video_sync;
1043 AVCodecContext *enc = ost->enc_ctx;
1044 AVCodecParameters *mux_par = ost->st->codecpar;
1045 AVRational frame_rate;
1046 int nb_frames, nb0_frames, i;
1047 double delta, delta0;
1048 double duration = 0;
1050 InputStream *ist = NULL;
1051 AVFilterContext *filter = ost->filter->filter;
1053 if (ost->source_index >= 0)
1054 ist = input_streams[ost->source_index];
1056 frame_rate = av_buffersink_get_frame_rate(filter);
1057 if (frame_rate.num > 0 && frame_rate.den > 0)
1058 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1060 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1061 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1063 if (!ost->filters_script &&
1067 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1068 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1071 if (!next_picture) {
1073 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1074 ost->last_nb0_frames[1],
1075 ost->last_nb0_frames[2]);
1077 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1078 delta = delta0 + duration;
1080 /* by default, we output a single frame */
1081 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1084 format_video_sync = video_sync_method;
1085 if (format_video_sync == VSYNC_AUTO) {
1086 if(!strcmp(of->ctx->oformat->name, "avi")) {
1087 format_video_sync = VSYNC_VFR;
1089 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1091 && format_video_sync == VSYNC_CFR
1092 && input_files[ist->file_index]->ctx->nb_streams == 1
1093 && input_files[ist->file_index]->input_ts_offset == 0) {
1094 format_video_sync = VSYNC_VSCFR;
1096 if (format_video_sync == VSYNC_CFR && copy_ts) {
1097 format_video_sync = VSYNC_VSCFR;
1100 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1104 format_video_sync != VSYNC_PASSTHROUGH &&
1105 format_video_sync != VSYNC_DROP) {
1106 if (delta0 < -0.6) {
1107 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1109 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1110 sync_ipts = ost->sync_opts;
1115 switch (format_video_sync) {
1117 if (ost->frame_number == 0 && delta0 >= 0.5) {
1118 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1121 ost->sync_opts = lrint(sync_ipts);
1124 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1125 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1127 } else if (delta < -1.1)
1129 else if (delta > 1.1) {
1130 nb_frames = lrintf(delta);
1132 nb0_frames = lrintf(delta0 - 0.6);
1138 else if (delta > 0.6)
1139 ost->sync_opts = lrint(sync_ipts);
1142 case VSYNC_PASSTHROUGH:
1143 ost->sync_opts = lrint(sync_ipts);
1150 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1151 nb0_frames = FFMIN(nb0_frames, nb_frames);
1153 memmove(ost->last_nb0_frames + 1,
1154 ost->last_nb0_frames,
1155 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1156 ost->last_nb0_frames[0] = nb0_frames;
1158 if (nb0_frames == 0 && ost->last_dropped) {
1160 av_log(NULL, AV_LOG_VERBOSE,
1161 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1162 ost->frame_number, ost->st->index, ost->last_frame->pts);
1164 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1165 if (nb_frames > dts_error_threshold * 30) {
1166 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1170 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1171 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1172 if (nb_frames_dup > dup_warning) {
1173 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1177 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1179 /* duplicates frame if needed */
1180 for (i = 0; i < nb_frames; i++) {
1181 AVFrame *in_picture;
1182 av_init_packet(&pkt);
1186 if (i < nb0_frames && ost->last_frame) {
1187 in_picture = ost->last_frame;
1189 in_picture = next_picture;
1194 in_picture->pts = ost->sync_opts;
1197 if (!check_recording_time(ost))
1199 if (ost->frame_number >= ost->max_frames)
1203 #if FF_API_LAVF_FMT_RAWPICTURE
1204 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1205 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1206 /* raw pictures are written as AVPicture structure to
1207 avoid any copies. We support temporarily the older
1209 if (in_picture->interlaced_frame)
1210 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1212 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1213 pkt.data = (uint8_t *)in_picture;
1214 pkt.size = sizeof(AVPicture);
1215 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1216 pkt.flags |= AV_PKT_FLAG_KEY;
1218 output_packet(of, &pkt, ost);
1222 int forced_keyframe = 0;
1225 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1226 ost->top_field_first >= 0)
1227 in_picture->top_field_first = !!ost->top_field_first;
1229 if (in_picture->interlaced_frame) {
1230 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1231 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1233 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1235 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1237 in_picture->quality = enc->global_quality;
1238 in_picture->pict_type = 0;
1240 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1241 in_picture->pts * av_q2d(enc->time_base) : NAN;
1242 if (ost->forced_kf_index < ost->forced_kf_count &&
1243 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1244 ost->forced_kf_index++;
1245 forced_keyframe = 1;
1246 } else if (ost->forced_keyframes_pexpr) {
1248 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1249 res = av_expr_eval(ost->forced_keyframes_pexpr,
1250 ost->forced_keyframes_expr_const_values, NULL);
1251 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1252 ost->forced_keyframes_expr_const_values[FKF_N],
1253 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1255 ost->forced_keyframes_expr_const_values[FKF_T],
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1259 forced_keyframe = 1;
1260 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1261 ost->forced_keyframes_expr_const_values[FKF_N];
1262 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1263 ost->forced_keyframes_expr_const_values[FKF_T];
1264 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1267 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1268 } else if ( ost->forced_keyframes
1269 && !strncmp(ost->forced_keyframes, "source", 6)
1270 && in_picture->key_frame==1) {
1271 forced_keyframe = 1;
1274 if (forced_keyframe) {
1275 in_picture->pict_type = AV_PICTURE_TYPE_I;
1276 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1279 update_benchmark(NULL);
1281 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1282 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1283 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1284 enc->time_base.num, enc->time_base.den);
1287 ost->frames_encoded++;
1289 ret = avcodec_send_frame(enc, in_picture);
1294 ret = avcodec_receive_packet(enc, &pkt);
1295 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296 if (ret == AVERROR(EAGAIN))
1302 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1308 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309 pkt.pts = ost->sync_opts;
1311 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1314 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1320 frame_size = pkt.size;
1321 output_packet(of, &pkt, ost);
1323 /* if two pass, output log */
1324 if (ost->logfile && enc->stats_out) {
1325 fprintf(ost->logfile, "%s", enc->stats_out);
1331 * For video, number of frames in == number of packets out.
1332 * But there may be reordering, so we can't throw away frames on encoder
1333 * flush, we need to limit them here, before they go into encoder.
1335 ost->frame_number++;
1337 if (vstats_filename && frame_size)
1338 do_video_stats(ost, frame_size);
1341 if (!ost->last_frame)
1342 ost->last_frame = av_frame_alloc();
1343 av_frame_unref(ost->last_frame);
1344 if (next_picture && ost->last_frame)
1345 av_frame_ref(ost->last_frame, next_picture);
1347 av_frame_free(&ost->last_frame);
1351 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1355 static double psnr(double d)
1357 return -10.0 * log10(d);
1360 static void do_video_stats(OutputStream *ost, int frame_size)
1362 AVCodecContext *enc;
1364 double ti1, bitrate, avg_bitrate;
1366 /* this is executed just the first time do_video_stats is called */
1368 vstats_file = fopen(vstats_filename, "w");
1376 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377 frame_number = ost->st->nb_frames;
1378 if (vstats_version <= 1) {
1379 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380 ost->quality / (float)FF_QP2LAMBDA);
1382 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383 ost->quality / (float)FF_QP2LAMBDA);
1386 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1389 fprintf(vstats_file,"f_size= %6d ", frame_size);
1390 /* compute pts value */
1391 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1395 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1405 static void finish_output_stream(OutputStream *ost)
1407 OutputFile *of = output_files[ost->file_index];
1410 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1413 for (i = 0; i < of->ctx->nb_streams; i++)
1414 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1419 * Get and encode new output from any of the filtergraphs, without causing
1422 * @return 0 for success, <0 for severe errors
1424 static int reap_filters(int flush)
1426 AVFrame *filtered_frame = NULL;
1429 /* Reap all buffers present in the buffer sinks */
1430 for (i = 0; i < nb_output_streams; i++) {
1431 OutputStream *ost = output_streams[i];
1432 OutputFile *of = output_files[ost->file_index];
1433 AVFilterContext *filter;
1434 AVCodecContext *enc = ost->enc_ctx;
1437 if (!ost->filter || !ost->filter->graph->graph)
1439 filter = ost->filter->filter;
1441 if (!ost->initialized) {
1442 char error[1024] = "";
1443 ret = init_output_stream(ost, error, sizeof(error));
1445 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446 ost->file_index, ost->index, error);
1451 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452 return AVERROR(ENOMEM);
1454 filtered_frame = ost->filtered_frame;
1457 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459 AV_BUFFERSINK_FLAG_NO_REQUEST);
1461 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462 av_log(NULL, AV_LOG_WARNING,
1463 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464 } else if (flush && ret == AVERROR_EOF) {
1465 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1466 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1470 if (ost->finished) {
1471 av_frame_unref(filtered_frame);
1474 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1476 AVRational filter_tb = av_buffersink_get_time_base(filter);
1477 AVRational tb = enc->time_base;
1478 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1480 tb.den <<= extra_bits;
1482 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1483 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1484 float_pts /= 1 << extra_bits;
1485 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1488 filtered_frame->pts =
1489 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1490 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1492 //if (ost->source_index >= 0)
1493 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1495 switch (av_buffersink_get_type(filter)) {
1496 case AVMEDIA_TYPE_VIDEO:
1497 if (!ost->frame_aspect_ratio.num)
1498 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1501 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1502 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1504 enc->time_base.num, enc->time_base.den);
1507 do_video_out(of, ost, filtered_frame, float_pts);
1509 case AVMEDIA_TYPE_AUDIO:
1510 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1511 enc->channels != filtered_frame->channels) {
1512 av_log(NULL, AV_LOG_ERROR,
1513 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1516 do_audio_out(of, ost, filtered_frame);
1519 // TODO support subtitle filters
1523 av_frame_unref(filtered_frame);
1530 static void print_final_stats(int64_t total_size)
1532 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1533 uint64_t subtitle_size = 0;
1534 uint64_t data_size = 0;
1535 float percent = -1.0;
1539 for (i = 0; i < nb_output_streams; i++) {
1540 OutputStream *ost = output_streams[i];
1541 switch (ost->enc_ctx->codec_type) {
1542 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1544 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1545 default: other_size += ost->data_size; break;
1547 extra_size += ost->enc_ctx->extradata_size;
1548 data_size += ost->data_size;
1549 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1550 != AV_CODEC_FLAG_PASS1)
1554 if (data_size && total_size>0 && total_size >= data_size)
1555 percent = 100.0 * (total_size - data_size) / data_size;
1557 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1558 video_size / 1024.0,
1559 audio_size / 1024.0,
1560 subtitle_size / 1024.0,
1561 other_size / 1024.0,
1562 extra_size / 1024.0);
1564 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1566 av_log(NULL, AV_LOG_INFO, "unknown");
1567 av_log(NULL, AV_LOG_INFO, "\n");
1569 /* print verbose per-stream stats */
1570 for (i = 0; i < nb_input_files; i++) {
1571 InputFile *f = input_files[i];
1572 uint64_t total_packets = 0, total_size = 0;
1574 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1575 i, f->ctx->filename);
1577 for (j = 0; j < f->nb_streams; j++) {
1578 InputStream *ist = input_streams[f->ist_index + j];
1579 enum AVMediaType type = ist->dec_ctx->codec_type;
1581 total_size += ist->data_size;
1582 total_packets += ist->nb_packets;
1584 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1585 i, j, media_type_string(type));
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1587 ist->nb_packets, ist->data_size);
1589 if (ist->decoding_needed) {
1590 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1591 ist->frames_decoded);
1592 if (type == AVMEDIA_TYPE_AUDIO)
1593 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1594 av_log(NULL, AV_LOG_VERBOSE, "; ");
1597 av_log(NULL, AV_LOG_VERBOSE, "\n");
1600 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1601 total_packets, total_size);
1604 for (i = 0; i < nb_output_files; i++) {
1605 OutputFile *of = output_files[i];
1606 uint64_t total_packets = 0, total_size = 0;
1608 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1609 i, of->ctx->filename);
1611 for (j = 0; j < of->ctx->nb_streams; j++) {
1612 OutputStream *ost = output_streams[of->ost_index + j];
1613 enum AVMediaType type = ost->enc_ctx->codec_type;
1615 total_size += ost->data_size;
1616 total_packets += ost->packets_written;
1618 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1619 i, j, media_type_string(type));
1620 if (ost->encoding_needed) {
1621 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1622 ost->frames_encoded);
1623 if (type == AVMEDIA_TYPE_AUDIO)
1624 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1625 av_log(NULL, AV_LOG_VERBOSE, "; ");
1628 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1629 ost->packets_written, ost->data_size);
1631 av_log(NULL, AV_LOG_VERBOSE, "\n");
1634 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1635 total_packets, total_size);
1637 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1638 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1640 av_log(NULL, AV_LOG_WARNING, "\n");
1642 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1647 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1650 AVBPrint buf_script;
1652 AVFormatContext *oc;
1654 AVCodecContext *enc;
1655 int frame_number, vid, i;
1658 int64_t pts = INT64_MIN + 1;
1659 static int64_t last_time = -1;
1660 static int qp_histogram[52];
1661 int hours, mins, secs, us;
1665 if (!print_stats && !is_last_report && !progress_avio)
1668 if (!is_last_report) {
1669 if (last_time == -1) {
1670 last_time = cur_time;
1673 if ((cur_time - last_time) < 500000)
1675 last_time = cur_time;
1678 t = (cur_time-timer_start) / 1000000.0;
1681 oc = output_files[0]->ctx;
1683 total_size = avio_size(oc->pb);
1684 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1685 total_size = avio_tell(oc->pb);
1689 av_bprint_init(&buf_script, 0, 1);
1690 for (i = 0; i < nb_output_streams; i++) {
1692 ost = output_streams[i];
1694 if (!ost->stream_copy)
1695 q = ost->quality / (float) FF_QP2LAMBDA;
1697 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1699 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700 ost->file_index, ost->index, q);
1702 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1705 frame_number = ost->frame_number;
1706 fps = t > 1 ? frame_number / t : 0;
1707 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1708 frame_number, fps < 9.95, fps, q);
1709 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1710 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1711 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1712 ost->file_index, ost->index, q);
1714 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1718 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1720 for (j = 0; j < 32; j++)
1721 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1724 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1726 double error, error_sum = 0;
1727 double scale, scale_sum = 0;
1729 char type[3] = { 'Y','U','V' };
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1731 for (j = 0; j < 3; j++) {
1732 if (is_last_report) {
1733 error = enc->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1736 error = ost->error[j];
1737 scale = enc->width * enc->height * 255.0 * 255.0;
1743 p = psnr(error / scale);
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1745 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1746 ost->file_index, ost->index, type[j] | 32, p);
1748 p = psnr(error_sum / scale_sum);
1749 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1750 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1751 ost->file_index, ost->index, p);
1755 /* compute min output value */
1756 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1757 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1758 ost->st->time_base, AV_TIME_BASE_Q));
1760 nb_frames_drop += ost->last_dropped;
1763 secs = FFABS(pts) / AV_TIME_BASE;
1764 us = FFABS(pts) % AV_TIME_BASE;
1770 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1771 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1773 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1776 "size=%8.0fkB time=", total_size / 1024.0);
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1780 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1781 (100 * us) / AV_TIME_BASE);
1784 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1785 av_bprintf(&buf_script, "bitrate=N/A\n");
1787 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1788 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1791 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1792 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1793 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1794 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1795 hours, mins, secs, us);
1797 if (nb_frames_dup || nb_frames_drop)
1798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1799 nb_frames_dup, nb_frames_drop);
1800 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1801 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1804 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1805 av_bprintf(&buf_script, "speed=N/A\n");
1807 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1808 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1811 if (print_stats || is_last_report) {
1812 const char end = is_last_report ? '\n' : '\r';
1813 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1814 fprintf(stderr, "%s %c", buf, end);
1816 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1821 if (progress_avio) {
1822 av_bprintf(&buf_script, "progress=%s\n",
1823 is_last_report ? "end" : "continue");
1824 avio_write(progress_avio, buf_script.str,
1825 FFMIN(buf_script.len, buf_script.size - 1));
1826 avio_flush(progress_avio);
1827 av_bprint_finalize(&buf_script, NULL);
1828 if (is_last_report) {
1829 if ((ret = avio_closep(&progress_avio)) < 0)
1830 av_log(NULL, AV_LOG_ERROR,
1831 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1836 print_final_stats(total_size);
1839 static void flush_encoders(void)
1843 for (i = 0; i < nb_output_streams; i++) {
1844 OutputStream *ost = output_streams[i];
1845 AVCodecContext *enc = ost->enc_ctx;
1846 OutputFile *of = output_files[ost->file_index];
1848 if (!ost->encoding_needed)
1851 // Try to enable encoding with no input frames.
1852 // Maybe we should just let encoding fail instead.
1853 if (!ost->initialized) {
1854 FilterGraph *fg = ost->filter->graph;
1855 char error[1024] = "";
1857 av_log(NULL, AV_LOG_WARNING,
1858 "Finishing stream %d:%d without any data written to it.\n",
1859 ost->file_index, ost->st->index);
1861 if (ost->filter && !fg->graph) {
1863 for (x = 0; x < fg->nb_inputs; x++) {
1864 InputFilter *ifilter = fg->inputs[x];
1865 if (ifilter->format < 0) {
1866 AVCodecParameters *par = ifilter->ist->st->codecpar;
1867 // We never got any input. Set a fake format, which will
1868 // come from libavformat.
1869 ifilter->format = par->format;
1870 ifilter->sample_rate = par->sample_rate;
1871 ifilter->channels = par->channels;
1872 ifilter->channel_layout = par->channel_layout;
1873 ifilter->width = par->width;
1874 ifilter->height = par->height;
1875 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1879 if (!ifilter_has_all_input_formats(fg))
1882 ret = configure_filtergraph(fg);
1884 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1888 finish_output_stream(ost);
1891 ret = init_output_stream(ost, error, sizeof(error));
1893 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1894 ost->file_index, ost->index, error);
1899 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1901 #if FF_API_LAVF_FMT_RAWPICTURE
1902 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1906 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1910 const char *desc = NULL;
1914 switch (enc->codec_type) {
1915 case AVMEDIA_TYPE_AUDIO:
1918 case AVMEDIA_TYPE_VIDEO:
1925 av_init_packet(&pkt);
1929 update_benchmark(NULL);
1931 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1932 ret = avcodec_send_frame(enc, NULL);
1934 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1941 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1942 if (ret < 0 && ret != AVERROR_EOF) {
1943 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948 if (ost->logfile && enc->stats_out) {
1949 fprintf(ost->logfile, "%s", enc->stats_out);
1951 if (ret == AVERROR_EOF) {
1954 if (ost->finished & MUXER_FINISHED) {
1955 av_packet_unref(&pkt);
1958 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959 pkt_size = pkt.size;
1960 output_packet(of, &pkt, ost);
1961 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1962 do_video_stats(ost, pkt_size);
1969 * Check whether a packet from ist should be written into ost at this time
1971 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1973 OutputFile *of = output_files[ost->file_index];
1974 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1976 if (ost->source_index != ist_index)
1982 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1990 OutputFile *of = output_files[ost->file_index];
1991 InputFile *f = input_files [ist->file_index];
1992 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997 av_init_packet(&opkt);
1999 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000 !ost->copy_initial_nonkeyframes)
2003 if (!ost->frame_number && !ost->copy_prior_start) {
2004 int64_t comp_start = start_time;
2005 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007 if (pkt->pts == AV_NOPTS_VALUE ?
2008 ist->pts < comp_start :
2009 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2013 if (of->recording_time != INT64_MAX &&
2014 ist->pts >= of->recording_time + start_time) {
2015 close_output_stream(ost);
2019 if (f->recording_time != INT64_MAX) {
2020 start_time = f->ctx->start_time;
2021 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022 start_time += f->start_time;
2023 if (ist->pts >= f->recording_time + start_time) {
2024 close_output_stream(ost);
2029 /* force the input stream PTS */
2030 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 opkt.pts = AV_NOPTS_VALUE;
2038 if (pkt->dts == AV_NOPTS_VALUE)
2039 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2041 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042 opkt.dts -= ost_tb_start_time;
2044 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2047 duration = ist->dec_ctx->frame_size;
2048 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2050 ost->mux_timebase) - ost_tb_start_time;
2053 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2055 opkt.flags = pkt->flags;
2056 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2062 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063 &opkt.data, &opkt.size,
2064 pkt->data, pkt->size,
2065 pkt->flags & AV_PKT_FLAG_KEY);
2067 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2072 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2080 av_copy_packet_side_data(&opkt, pkt);
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086 /* store AVPicture in AVPacket, as expected by the output format */
2087 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2089 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2093 opkt.data = (uint8_t *)&pict;
2094 opkt.size = sizeof(AVPicture);
2095 opkt.flags |= AV_PKT_FLAG_KEY;
2099 output_packet(of, &opkt, ost);
2102 int guess_input_channel_layout(InputStream *ist)
2104 AVCodecContext *dec = ist->dec_ctx;
2106 if (!dec->channel_layout) {
2107 char layout_name[256];
2109 if (dec->channels > ist->guess_layout_max)
2111 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2112 if (!dec->channel_layout)
2114 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115 dec->channels, dec->channel_layout);
2116 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2124 if (*got_output || ret<0)
2125 decode_error_stat[ret<0] ++;
2127 if (ret < 0 && exit_on_error)
2130 if (exit_on_error && *got_output && ist) {
2131 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2132 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2157 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2161 switch (ifilter->ist->st->codecpar->codec_type) {
2162 case AVMEDIA_TYPE_AUDIO:
2163 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164 ifilter->channels != frame->channels ||
2165 ifilter->channel_layout != frame->channel_layout;
2167 case AVMEDIA_TYPE_VIDEO:
2168 need_reinit |= ifilter->width != frame->width ||
2169 ifilter->height != frame->height;
2174 ret = ifilter_parameters_from_frame(ifilter, frame);
2179 /* (re)init the graph if possible, otherwise buffer the frame and return */
2180 if (need_reinit || !fg->graph) {
2181 for (i = 0; i < fg->nb_inputs; i++) {
2182 if (!ifilter_has_all_input_formats(fg)) {
2183 AVFrame *tmp = av_frame_clone(frame);
2185 return AVERROR(ENOMEM);
2186 av_frame_unref(frame);
2188 if (!av_fifo_space(ifilter->frame_queue)) {
2189 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2191 av_frame_free(&tmp);
2195 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200 ret = reap_filters(1);
2201 if (ret < 0 && ret != AVERROR_EOF) {
2203 av_strerror(ret, errbuf, sizeof(errbuf));
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2209 ret = configure_filtergraph(fg);
2211 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2218 if (ret != AVERROR_EOF)
2219 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226 static int ifilter_send_eof(InputFilter *ifilter)
2232 if (ifilter->filter) {
2233 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2237 // the filtergraph was never configured
2238 FilterGraph *fg = ifilter->graph;
2239 for (i = 0; i < fg->nb_inputs; i++)
2240 if (!fg->inputs[i]->eof)
2242 if (i == fg->nb_inputs) {
2243 // All the input streams have finished without the filtergraph
2244 // ever being configured.
2245 // Mark the output streams as finished.
2246 for (j = 0; j < fg->nb_outputs; j++)
2247 finish_output_stream(fg->outputs[j]->ost);
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2257 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2265 ret = avcodec_send_packet(avctx, pkt);
2266 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267 // decoded frames with avcodec_receive_frame() until done.
2268 if (ret < 0 && ret != AVERROR_EOF)
2272 ret = avcodec_receive_frame(avctx, frame);
2273 if (ret < 0 && ret != AVERROR(EAGAIN))
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2286 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287 for (i = 0; i < ist->nb_filters; i++) {
2288 if (i < ist->nb_filters - 1) {
2289 f = ist->filter_frame;
2290 ret = av_frame_ref(f, decoded_frame);
2295 ret = ifilter_send_frame(ist->filters[i], f);
2296 if (ret == AVERROR_EOF)
2297 ret = 0; /* ignore */
2299 av_log(NULL, AV_LOG_ERROR,
2300 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2310 AVFrame *decoded_frame;
2311 AVCodecContext *avctx = ist->dec_ctx;
2313 AVRational decoded_frame_tb;
2315 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318 return AVERROR(ENOMEM);
2319 decoded_frame = ist->decoded_frame;
2321 update_benchmark(NULL);
2322 ret = decode(avctx, decoded_frame, got_output, pkt);
2323 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2327 if (ret >= 0 && avctx->sample_rate <= 0) {
2328 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329 ret = AVERROR_INVALIDDATA;
2332 if (ret != AVERROR_EOF)
2333 check_decode_result(ist, got_output, ret);
2335 if (!*got_output || ret < 0)
2338 ist->samples_decoded += decoded_frame->nb_samples;
2339 ist->frames_decoded++;
2342 /* increment next_dts to use for the case where the input stream does not
2343 have timestamps or there are multiple frames in the packet */
2344 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2346 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2350 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351 decoded_frame_tb = ist->st->time_base;
2352 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353 decoded_frame->pts = pkt->pts;
2354 decoded_frame_tb = ist->st->time_base;
2356 decoded_frame->pts = ist->dts;
2357 decoded_frame_tb = AV_TIME_BASE_Q;
2359 if (decoded_frame->pts != AV_NOPTS_VALUE)
2360 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362 (AVRational){1, avctx->sample_rate});
2363 ist->nb_samples = decoded_frame->nb_samples;
2364 err = send_frame_to_filters(ist, decoded_frame);
2366 av_frame_unref(ist->filter_frame);
2367 av_frame_unref(decoded_frame);
2368 return err < 0 ? err : ret;
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2374 AVFrame *decoded_frame;
2375 int i, ret = 0, err = 0;
2376 int64_t best_effort_timestamp;
2377 int64_t dts = AV_NOPTS_VALUE;
2380 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2383 if (!eof && pkt && pkt->size == 0)
2386 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389 return AVERROR(ENOMEM);
2390 decoded_frame = ist->decoded_frame;
2391 if (ist->dts != AV_NOPTS_VALUE)
2392 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2395 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2398 // The old code used to set dts on the drain packet, which does not work
2399 // with the new API anymore.
2401 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2403 return AVERROR(ENOMEM);
2404 ist->dts_buffer = new;
2405 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2408 update_benchmark(NULL);
2409 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2414 // The following line may be required in some cases where there is no parser
2415 // or the parser does not has_b_frames correctly
2416 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2420 av_log(ist->dec_ctx, AV_LOG_WARNING,
2421 "video_delay is larger in decoder than demuxer %d > %d.\n"
2422 "If you want to help, upload a sample "
2423 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425 ist->dec_ctx->has_b_frames,
2426 ist->st->codecpar->video_delay);
2429 if (ret != AVERROR_EOF)
2430 check_decode_result(ist, got_output, ret);
2432 if (*got_output && ret >= 0) {
2433 if (ist->dec_ctx->width != decoded_frame->width ||
2434 ist->dec_ctx->height != decoded_frame->height ||
2435 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437 decoded_frame->width,
2438 decoded_frame->height,
2439 decoded_frame->format,
2440 ist->dec_ctx->width,
2441 ist->dec_ctx->height,
2442 ist->dec_ctx->pix_fmt);
2446 if (!*got_output || ret < 0)
2449 if(ist->top_field_first>=0)
2450 decoded_frame->top_field_first = ist->top_field_first;
2452 ist->frames_decoded++;
2454 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2459 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2461 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2463 if (ist->framerate.num)
2464 best_effort_timestamp = ist->cfr_next_pts++;
2466 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2467 best_effort_timestamp = ist->dts_buffer[0];
2469 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2470 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2471 ist->nb_dts_buffer--;
2474 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2475 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2477 if (ts != AV_NOPTS_VALUE)
2478 ist->next_pts = ist->pts = ts;
2482 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2483 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2484 ist->st->index, av_ts2str(decoded_frame->pts),
2485 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2486 best_effort_timestamp,
2487 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2488 decoded_frame->key_frame, decoded_frame->pict_type,
2489 ist->st->time_base.num, ist->st->time_base.den);
2492 if (ist->st->sample_aspect_ratio.num)
2493 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2495 err = send_frame_to_filters(ist, decoded_frame);
2498 av_frame_unref(ist->filter_frame);
2499 av_frame_unref(decoded_frame);
2500 return err < 0 ? err : ret;
2503 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2506 AVSubtitle subtitle;
2508 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2509 &subtitle, got_output, pkt);
2511 check_decode_result(NULL, got_output, ret);
2513 if (ret < 0 || !*got_output) {
2516 sub2video_flush(ist);
2520 if (ist->fix_sub_duration) {
2522 if (ist->prev_sub.got_output) {
2523 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2524 1000, AV_TIME_BASE);
2525 if (end < ist->prev_sub.subtitle.end_display_time) {
2526 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2527 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2528 ist->prev_sub.subtitle.end_display_time, end,
2529 end <= 0 ? ", dropping it" : "");
2530 ist->prev_sub.subtitle.end_display_time = end;
2533 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2534 FFSWAP(int, ret, ist->prev_sub.ret);
2535 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2543 if (ist->sub2video.frame) {
2544 sub2video_update(ist, &subtitle);
2545 } else if (ist->nb_filters) {
2546 if (!ist->sub2video.sub_queue)
2547 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2548 if (!ist->sub2video.sub_queue)
2550 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2551 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2555 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2559 if (!subtitle.num_rects)
2562 ist->frames_decoded++;
2564 for (i = 0; i < nb_output_streams; i++) {
2565 OutputStream *ost = output_streams[i];
2567 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2568 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2571 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2576 avsubtitle_free(&subtitle);
2580 static int send_filter_eof(InputStream *ist)
2583 for (i = 0; i < ist->nb_filters; i++) {
2584 ret = ifilter_send_eof(ist->filters[i]);
2591 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2592 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2596 int eof_reached = 0;
2599 if (!ist->saw_first_ts) {
2600 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2602 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2603 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2604 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2606 ist->saw_first_ts = 1;
2609 if (ist->next_dts == AV_NOPTS_VALUE)
2610 ist->next_dts = ist->dts;
2611 if (ist->next_pts == AV_NOPTS_VALUE)
2612 ist->next_pts = ist->pts;
2616 av_init_packet(&avpkt);
2623 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2624 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2625 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2626 ist->next_pts = ist->pts = ist->dts;
2629 // while we have more to decode or while the decoder did output something on EOF
2630 while (ist->decoding_needed) {
2631 int64_t duration = 0;
2633 int decode_failed = 0;
2635 ist->pts = ist->next_pts;
2636 ist->dts = ist->next_dts;
2638 switch (ist->dec_ctx->codec_type) {
2639 case AVMEDIA_TYPE_AUDIO:
2640 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2643 case AVMEDIA_TYPE_VIDEO:
2644 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2646 if (!repeating || !pkt || got_output) {
2647 if (pkt && pkt->duration) {
2648 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2650 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2651 duration = ((int64_t)AV_TIME_BASE *
2652 ist->dec_ctx->framerate.den * ticks) /
2653 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2656 if(ist->dts != AV_NOPTS_VALUE && duration) {
2657 ist->next_dts += duration;
2659 ist->next_dts = AV_NOPTS_VALUE;
2663 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2665 case AVMEDIA_TYPE_SUBTITLE:
2668 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2669 if (!pkt && ret >= 0)
2676 if (ret == AVERROR_EOF) {
2682 if (decode_failed) {
2683 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2684 ist->file_index, ist->st->index, av_err2str(ret));
2686 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2687 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2689 if (!decode_failed || exit_on_error)
2695 ist->got_output = 1;
2700 // During draining, we might get multiple output frames in this loop.
2701 // ffmpeg.c does not drain the filter chain on configuration changes,
2702 // which means if we send multiple frames at once to the filters, and
2703 // one of those frames changes configuration, the buffered frames will
2704 // be lost. This can upset certain FATE tests.
2705 // Decode only 1 frame per call on EOF to appease these FATE tests.
2706 // The ideal solution would be to rewrite decoding to use the new
2707 // decoding API in a better way.
2714 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2715 /* except when looping we need to flush but not to send an EOF */
2716 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2717 int ret = send_filter_eof(ist);
2719 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2724 /* handle stream copy */
2725 if (!ist->decoding_needed) {
2726 ist->dts = ist->next_dts;
2727 switch (ist->dec_ctx->codec_type) {
2728 case AVMEDIA_TYPE_AUDIO:
2729 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2730 ist->dec_ctx->sample_rate;
2732 case AVMEDIA_TYPE_VIDEO:
2733 if (ist->framerate.num) {
2734 // TODO: Remove work-around for c99-to-c89 issue 7
2735 AVRational time_base_q = AV_TIME_BASE_Q;
2736 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2737 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2738 } else if (pkt->duration) {
2739 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2740 } else if(ist->dec_ctx->framerate.num != 0) {
2741 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2742 ist->next_dts += ((int64_t)AV_TIME_BASE *
2743 ist->dec_ctx->framerate.den * ticks) /
2744 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2748 ist->pts = ist->dts;
2749 ist->next_pts = ist->next_dts;
2751 for (i = 0; pkt && i < nb_output_streams; i++) {
2752 OutputStream *ost = output_streams[i];
2754 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2757 do_streamcopy(ist, ost, pkt);
2760 return !eof_reached;
2763 static void print_sdp(void)
2768 AVIOContext *sdp_pb;
2769 AVFormatContext **avc;
2771 for (i = 0; i < nb_output_files; i++) {
2772 if (!output_files[i]->header_written)
2776 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2779 for (i = 0, j = 0; i < nb_output_files; i++) {
2780 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2781 avc[j] = output_files[i]->ctx;
2789 av_sdp_create(avc, j, sdp, sizeof(sdp));
2791 if (!sdp_filename) {
2792 printf("SDP:\n%s\n", sdp);
2795 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2796 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2798 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2799 avio_closep(&sdp_pb);
2800 av_freep(&sdp_filename);
2808 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2811 for (i = 0; hwaccels[i].name; i++)
2812 if (hwaccels[i].pix_fmt == pix_fmt)
2813 return &hwaccels[i];
2817 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2819 InputStream *ist = s->opaque;
2820 const enum AVPixelFormat *p;
2823 for (p = pix_fmts; *p != -1; p++) {
2824 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2825 const HWAccel *hwaccel;
2827 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2830 hwaccel = get_hwaccel(*p);
2832 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2833 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2836 ret = hwaccel->init(s);
2838 if (ist->hwaccel_id == hwaccel->id) {
2839 av_log(NULL, AV_LOG_FATAL,
2840 "%s hwaccel requested for input stream #%d:%d, "
2841 "but cannot be initialized.\n", hwaccel->name,
2842 ist->file_index, ist->st->index);
2843 return AV_PIX_FMT_NONE;
2848 if (ist->hw_frames_ctx) {
2849 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2850 if (!s->hw_frames_ctx)
2851 return AV_PIX_FMT_NONE;
2854 ist->active_hwaccel_id = hwaccel->id;
2855 ist->hwaccel_pix_fmt = *p;
2862 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2864 InputStream *ist = s->opaque;
2866 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2867 return ist->hwaccel_get_buffer(s, frame, flags);
2869 return avcodec_default_get_buffer2(s, frame, flags);
2872 static int init_input_stream(int ist_index, char *error, int error_len)
2875 InputStream *ist = input_streams[ist_index];
2877 if (ist->decoding_needed) {
2878 AVCodec *codec = ist->dec;
2880 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2881 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2882 return AVERROR(EINVAL);
2885 ist->dec_ctx->opaque = ist;
2886 ist->dec_ctx->get_format = get_format;
2887 ist->dec_ctx->get_buffer2 = get_buffer;
2888 ist->dec_ctx->thread_safe_callbacks = 1;
2890 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2891 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2892 (ist->decoding_needed & DECODING_FOR_OST)) {
2893 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2894 if (ist->decoding_needed & DECODING_FOR_FILTER)
2895 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2898 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2900 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2901 * audio, and video decoders such as cuvid or mediacodec */
2902 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2904 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2905 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2906 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2907 if (ret == AVERROR_EXPERIMENTAL)
2908 abort_codec_experimental(codec, 0);
2910 snprintf(error, error_len,
2911 "Error while opening decoder for input stream "
2913 ist->file_index, ist->st->index, av_err2str(ret));
2916 assert_avoptions(ist->decoder_opts);
2919 ist->next_pts = AV_NOPTS_VALUE;
2920 ist->next_dts = AV_NOPTS_VALUE;
2925 static InputStream *get_input_stream(OutputStream *ost)
2927 if (ost->source_index >= 0)
2928 return input_streams[ost->source_index];
2932 static int compare_int64(const void *a, const void *b)
2934 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2937 /* open the muxer when all the streams are initialized */
2938 static int check_init_output_file(OutputFile *of, int file_index)
2942 for (i = 0; i < of->ctx->nb_streams; i++) {
2943 OutputStream *ost = output_streams[of->ost_index + i];
2944 if (!ost->initialized)
2948 of->ctx->interrupt_callback = int_cb;
2950 ret = avformat_write_header(of->ctx, &of->opts);
2952 av_log(NULL, AV_LOG_ERROR,
2953 "Could not write header for output file #%d "
2954 "(incorrect codec parameters ?): %s\n",
2955 file_index, av_err2str(ret));
2958 //assert_avoptions(of->opts);
2959 of->header_written = 1;
2961 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2963 if (sdp_filename || want_sdp)
2966 /* flush the muxing queues */
2967 for (i = 0; i < of->ctx->nb_streams; i++) {
2968 OutputStream *ost = output_streams[of->ost_index + i];
2970 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2971 if (!av_fifo_size(ost->muxing_queue))
2972 ost->mux_timebase = ost->st->time_base;
2974 while (av_fifo_size(ost->muxing_queue)) {
2976 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2977 write_packet(of, &pkt, ost, 1);
2984 static int init_output_bsfs(OutputStream *ost)
2989 if (!ost->nb_bitstream_filters)
2992 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2993 ctx = ost->bsf_ctx[i];
2995 ret = avcodec_parameters_copy(ctx->par_in,
2996 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3000 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3002 ret = av_bsf_init(ctx);
3004 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3005 ost->bsf_ctx[i]->filter->name);
3010 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3011 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3015 ost->st->time_base = ctx->time_base_out;
3020 static int init_output_stream_streamcopy(OutputStream *ost)
3022 OutputFile *of = output_files[ost->file_index];
3023 InputStream *ist = get_input_stream(ost);
3024 AVCodecParameters *par_dst = ost->st->codecpar;
3025 AVCodecParameters *par_src = ost->ref_par;
3028 uint32_t codec_tag = par_dst->codec_tag;
3030 av_assert0(ist && !ost->filter);
3032 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3034 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3036 av_log(NULL, AV_LOG_FATAL,
3037 "Error setting up codec context options.\n");
3040 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3043 unsigned int codec_tag_tmp;
3044 if (!of->ctx->oformat->codec_tag ||
3045 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3046 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3047 codec_tag = par_src->codec_tag;
3050 ret = avcodec_parameters_copy(par_dst, par_src);
3054 par_dst->codec_tag = codec_tag;
3056 if (!ost->frame_rate.num)
3057 ost->frame_rate = ist->framerate;
3058 ost->st->avg_frame_rate = ost->frame_rate;
3060 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3064 // copy timebase while removing common factors
3065 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3066 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3068 // copy estimated duration as a hint to the muxer
3069 if (ost->st->duration <= 0 && ist->st->duration > 0)
3070 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3073 ost->st->disposition = ist->st->disposition;
3075 if (ist->st->nb_side_data) {
3076 for (i = 0; i < ist->st->nb_side_data; i++) {
3077 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3080 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3082 return AVERROR(ENOMEM);
3083 memcpy(dst_data, sd_src->data, sd_src->size);
3087 if (ost->rotate_overridden) {
3088 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3089 sizeof(int32_t) * 9);
3091 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3094 ost->parser = av_parser_init(par_dst->codec_id);
3095 ost->parser_avctx = avcodec_alloc_context3(NULL);
3096 if (!ost->parser_avctx)
3097 return AVERROR(ENOMEM);
3099 switch (par_dst->codec_type) {
3100 case AVMEDIA_TYPE_AUDIO:
3101 if (audio_volume != 256) {
3102 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3105 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3106 par_dst->block_align= 0;
3107 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3108 par_dst->block_align= 0;
3110 case AVMEDIA_TYPE_VIDEO:
3111 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3113 av_mul_q(ost->frame_aspect_ratio,
3114 (AVRational){ par_dst->height, par_dst->width });
3115 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3116 "with stream copy may produce invalid files\n");
3118 else if (ist->st->sample_aspect_ratio.num)
3119 sar = ist->st->sample_aspect_ratio;
3121 sar = par_src->sample_aspect_ratio;
3122 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3123 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3124 ost->st->r_frame_rate = ist->st->r_frame_rate;
3128 ost->mux_timebase = ist->st->time_base;
3133 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3135 AVDictionaryEntry *e;
3137 uint8_t *encoder_string;
3138 int encoder_string_len;
3139 int format_flags = 0;
3140 int codec_flags = 0;
3142 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3145 e = av_dict_get(of->opts, "fflags", NULL, 0);
3147 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3150 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3152 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3154 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3157 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3160 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3161 encoder_string = av_mallocz(encoder_string_len);
3162 if (!encoder_string)
3165 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3166 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3168 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3169 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3170 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3171 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3174 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3175 AVCodecContext *avctx)
3178 int n = 1, i, size, index = 0;
3181 for (p = kf; *p; p++)
3185 pts = av_malloc_array(size, sizeof(*pts));
3187 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3192 for (i = 0; i < n; i++) {
3193 char *next = strchr(p, ',');
3198 if (!memcmp(p, "chapters", 8)) {
3200 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3203 if (avf->nb_chapters > INT_MAX - size ||
3204 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3206 av_log(NULL, AV_LOG_FATAL,
3207 "Could not allocate forced key frames array.\n");
3210 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3211 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3213 for (j = 0; j < avf->nb_chapters; j++) {
3214 AVChapter *c = avf->chapters[j];
3215 av_assert1(index < size);
3216 pts[index++] = av_rescale_q(c->start, c->time_base,
3217 avctx->time_base) + t;
3222 t = parse_time_or_die("force_key_frames", p, 1);
3223 av_assert1(index < size);
3224 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3231 av_assert0(index == size);
3232 qsort(pts, size, sizeof(*pts), compare_int64);
3233 ost->forced_kf_count = size;
3234 ost->forced_kf_pts = pts;
3237 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3239 InputStream *ist = get_input_stream(ost);
3240 AVCodecContext *enc_ctx = ost->enc_ctx;
3241 AVFormatContext *oc;
3243 if (ost->enc_timebase.num > 0) {
3244 enc_ctx->time_base = ost->enc_timebase;
3248 if (ost->enc_timebase.num < 0) {
3250 enc_ctx->time_base = ist->st->time_base;
3254 oc = output_files[ost->file_index]->ctx;
3255 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3258 enc_ctx->time_base = default_time_base;
3261 static int init_output_stream_encode(OutputStream *ost)
3263 InputStream *ist = get_input_stream(ost);
3264 AVCodecContext *enc_ctx = ost->enc_ctx;
3265 AVCodecContext *dec_ctx = NULL;
3266 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3269 set_encoder_id(output_files[ost->file_index], ost);
3271 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3272 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3273 // which have to be filtered out to prevent leaking them to output files.
3274 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3277 ost->st->disposition = ist->st->disposition;
3279 dec_ctx = ist->dec_ctx;
3281 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3283 for (j = 0; j < oc->nb_streams; j++) {
3284 AVStream *st = oc->streams[j];
3285 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3288 if (j == oc->nb_streams)
3289 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3290 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3291 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3294 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3295 if (!ost->frame_rate.num)
3296 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3297 if (ist && !ost->frame_rate.num)
3298 ost->frame_rate = ist->framerate;
3299 if (ist && !ost->frame_rate.num)
3300 ost->frame_rate = ist->st->r_frame_rate;
3301 if (ist && !ost->frame_rate.num) {
3302 ost->frame_rate = (AVRational){25, 1};
3303 av_log(NULL, AV_LOG_WARNING,
3305 "about the input framerate is available. Falling "
3306 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3307 "if you want a different framerate.\n",
3308 ost->file_index, ost->index);
3310 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3311 if (ost->enc->supported_framerates && !ost->force_fps) {
3312 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3313 ost->frame_rate = ost->enc->supported_framerates[idx];
3315 // reduce frame rate for mpeg4 to be within the spec limits
3316 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3317 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3318 ost->frame_rate.num, ost->frame_rate.den, 65535);
3322 switch (enc_ctx->codec_type) {
3323 case AVMEDIA_TYPE_AUDIO:
3324 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3326 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3327 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3328 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3329 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3330 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3332 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3335 case AVMEDIA_TYPE_VIDEO:
3336 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3338 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3339 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3340 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3341 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3342 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3343 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3345 for (j = 0; j < ost->forced_kf_count; j++)
3346 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3348 enc_ctx->time_base);
3350 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3351 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3352 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3353 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3354 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3355 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3356 if (!strncmp(ost->enc->name, "libx264", 7) &&
3357 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3358 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3359 av_log(NULL, AV_LOG_WARNING,
3360 "No pixel format specified, %s for H.264 encoding chosen.\n"
3361 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3362 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3363 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3364 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3365 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3366 av_log(NULL, AV_LOG_WARNING,
3367 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3368 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3369 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3370 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3372 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3373 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3375 enc_ctx->framerate = ost->frame_rate;
3377 ost->st->avg_frame_rate = ost->frame_rate;
3380 enc_ctx->width != dec_ctx->width ||
3381 enc_ctx->height != dec_ctx->height ||
3382 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3383 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3386 if (ost->forced_keyframes) {
3387 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3388 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3389 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3391 av_log(NULL, AV_LOG_ERROR,
3392 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3395 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3396 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3397 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3398 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3400 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3401 // parse it only for static kf timings
3402 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3403 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3407 case AVMEDIA_TYPE_SUBTITLE:
3408 enc_ctx->time_base = AV_TIME_BASE_Q;
3409 if (!enc_ctx->width) {
3410 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3411 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3414 case AVMEDIA_TYPE_DATA:
3421 ost->mux_timebase = enc_ctx->time_base;
3426 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3430 if (ost->encoding_needed) {
3431 AVCodec *codec = ost->enc;
3432 AVCodecContext *dec = NULL;
3435 ret = init_output_stream_encode(ost);
3439 if ((ist = get_input_stream(ost)))
3441 if (dec && dec->subtitle_header) {
3442 /* ASS code assumes this buffer is null terminated so add extra byte. */
3443 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3444 if (!ost->enc_ctx->subtitle_header)
3445 return AVERROR(ENOMEM);
3446 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3447 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3449 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3450 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3451 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3453 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3454 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3455 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3457 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3458 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3459 av_buffersink_get_format(ost->filter->filter)) {
3460 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3461 if (!ost->enc_ctx->hw_frames_ctx)
3462 return AVERROR(ENOMEM);
3465 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3466 if (ret == AVERROR_EXPERIMENTAL)
3467 abort_codec_experimental(codec, 1);
3468 snprintf(error, error_len,
3469 "Error while opening encoder for output stream #%d:%d - "
3470 "maybe incorrect parameters such as bit_rate, rate, width or height",
3471 ost->file_index, ost->index);
3474 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3475 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3476 av_buffersink_set_frame_size(ost->filter->filter,
3477 ost->enc_ctx->frame_size);
3478 assert_avoptions(ost->encoder_opts);
3479 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3480 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3481 " It takes bits/s as argument, not kbits/s\n");
3483 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3485 av_log(NULL, AV_LOG_FATAL,
3486 "Error initializing the output stream codec context.\n");
3490 * FIXME: ost->st->codec should't be needed here anymore.
3492 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3496 if (ost->enc_ctx->nb_coded_side_data) {
3499 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3500 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3503 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3505 return AVERROR(ENOMEM);
3506 memcpy(dst_data, sd_src->data, sd_src->size);
3511 * Add global input side data. For now this is naive, and copies it
3512 * from the input stream's global side data. All side data should
3513 * really be funneled over AVFrame and libavfilter, then added back to
3514 * packet side data, and then potentially using the first packet for
3519 for (i = 0; i < ist->st->nb_side_data; i++) {
3520 AVPacketSideData *sd = &ist->st->side_data[i];
3521 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3523 return AVERROR(ENOMEM);
3524 memcpy(dst, sd->data, sd->size);
3525 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3526 av_display_rotation_set((uint32_t *)dst, 0);
3530 // copy timebase while removing common factors
3531 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3532 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3534 // copy estimated duration as a hint to the muxer
3535 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3536 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3538 ost->st->codec->codec= ost->enc_ctx->codec;
3539 } else if (ost->stream_copy) {
3540 ret = init_output_stream_streamcopy(ost);
3545 * FIXME: will the codec context used by the parser during streamcopy
3546 * This should go away with the new parser API.
3548 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3553 // parse user provided disposition, and update stream values
3554 if (ost->disposition) {
3555 static const AVOption opts[] = {
3556 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3557 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3558 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3559 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3560 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3561 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3562 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3563 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3564 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3565 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3566 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3567 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3568 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3569 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3572 static const AVClass class = {
3574 .item_name = av_default_item_name,
3576 .version = LIBAVUTIL_VERSION_INT,
3578 const AVClass *pclass = &class;
3580 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3585 /* initialize bitstream filters for the output stream
3586 * needs to be done here, because the codec id for streamcopy is not
3587 * known until now */
3588 ret = init_output_bsfs(ost);
3592 ost->initialized = 1;
3594 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3601 static void report_new_stream(int input_index, AVPacket *pkt)
3603 InputFile *file = input_files[input_index];
3604 AVStream *st = file->ctx->streams[pkt->stream_index];
3606 if (pkt->stream_index < file->nb_streams_warn)
3608 av_log(file->ctx, AV_LOG_WARNING,
3609 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3610 av_get_media_type_string(st->codecpar->codec_type),
3611 input_index, pkt->stream_index,
3612 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3613 file->nb_streams_warn = pkt->stream_index + 1;
3616 static int transcode_init(void)
3618 int ret = 0, i, j, k;
3619 AVFormatContext *oc;
3622 char error[1024] = {0};
3624 for (i = 0; i < nb_filtergraphs; i++) {
3625 FilterGraph *fg = filtergraphs[i];
3626 for (j = 0; j < fg->nb_outputs; j++) {
3627 OutputFilter *ofilter = fg->outputs[j];
3628 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3630 if (fg->nb_inputs != 1)
3632 for (k = nb_input_streams-1; k >= 0 ; k--)
3633 if (fg->inputs[0]->ist == input_streams[k])
3635 ofilter->ost->source_index = k;
3639 /* init framerate emulation */
3640 for (i = 0; i < nb_input_files; i++) {
3641 InputFile *ifile = input_files[i];
3642 if (ifile->rate_emu)
3643 for (j = 0; j < ifile->nb_streams; j++)
3644 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3647 /* init input streams */
3648 for (i = 0; i < nb_input_streams; i++)
3649 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3650 for (i = 0; i < nb_output_streams; i++) {
3651 ost = output_streams[i];
3652 avcodec_close(ost->enc_ctx);
3657 /* open each encoder */
3658 for (i = 0; i < nb_output_streams; i++) {
3659 // skip streams fed from filtergraphs until we have a frame for them
3660 if (output_streams[i]->filter)
3663 ret = init_output_stream(output_streams[i], error, sizeof(error));
3668 /* discard unused programs */
3669 for (i = 0; i < nb_input_files; i++) {
3670 InputFile *ifile = input_files[i];
3671 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3672 AVProgram *p = ifile->ctx->programs[j];
3673 int discard = AVDISCARD_ALL;
3675 for (k = 0; k < p->nb_stream_indexes; k++)
3676 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3677 discard = AVDISCARD_DEFAULT;
3680 p->discard = discard;
3684 /* write headers for files with no streams */
3685 for (i = 0; i < nb_output_files; i++) {
3686 oc = output_files[i]->ctx;
3687 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3688 ret = check_init_output_file(output_files[i], i);
3695 /* dump the stream mapping */
3696 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3697 for (i = 0; i < nb_input_streams; i++) {
3698 ist = input_streams[i];
3700 for (j = 0; j < ist->nb_filters; j++) {
3701 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3702 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3703 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3704 ist->filters[j]->name);
3705 if (nb_filtergraphs > 1)
3706 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3707 av_log(NULL, AV_LOG_INFO, "\n");
3712 for (i = 0; i < nb_output_streams; i++) {
3713 ost = output_streams[i];
3715 if (ost->attachment_filename) {
3716 /* an attached file */
3717 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3718 ost->attachment_filename, ost->file_index, ost->index);
3722 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3723 /* output from a complex graph */
3724 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3725 if (nb_filtergraphs > 1)
3726 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3728 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3729 ost->index, ost->enc ? ost->enc->name : "?");
3733 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3734 input_streams[ost->source_index]->file_index,
3735 input_streams[ost->source_index]->st->index,
3738 if (ost->sync_ist != input_streams[ost->source_index])
3739 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3740 ost->sync_ist->file_index,
3741 ost->sync_ist->st->index);
3742 if (ost->stream_copy)
3743 av_log(NULL, AV_LOG_INFO, " (copy)");
3745 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3746 const AVCodec *out_codec = ost->enc;
3747 const char *decoder_name = "?";
3748 const char *in_codec_name = "?";
3749 const char *encoder_name = "?";
3750 const char *out_codec_name = "?";
3751 const AVCodecDescriptor *desc;
3754 decoder_name = in_codec->name;
3755 desc = avcodec_descriptor_get(in_codec->id);
3757 in_codec_name = desc->name;
3758 if (!strcmp(decoder_name, in_codec_name))
3759 decoder_name = "native";
3763 encoder_name = out_codec->name;
3764 desc = avcodec_descriptor_get(out_codec->id);
3766 out_codec_name = desc->name;
3767 if (!strcmp(encoder_name, out_codec_name))
3768 encoder_name = "native";
3771 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3772 in_codec_name, decoder_name,
3773 out_codec_name, encoder_name);
3775 av_log(NULL, AV_LOG_INFO, "\n");
3779 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3783 atomic_store(&transcode_init_done, 1);
3788 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3789 static int need_output(void)
3793 for (i = 0; i < nb_output_streams; i++) {
3794 OutputStream *ost = output_streams[i];
3795 OutputFile *of = output_files[ost->file_index];
3796 AVFormatContext *os = output_files[ost->file_index]->ctx;
3798 if (ost->finished ||
3799 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3801 if (ost->frame_number >= ost->max_frames) {
3803 for (j = 0; j < of->ctx->nb_streams; j++)
3804 close_output_stream(output_streams[of->ost_index + j]);
3815 * Select the output stream to process.
3817 * @return selected output stream, or NULL if none available
3819 static OutputStream *choose_output(void)
3822 int64_t opts_min = INT64_MAX;
3823 OutputStream *ost_min = NULL;
3825 for (i = 0; i < nb_output_streams; i++) {
3826 OutputStream *ost = output_streams[i];
3827 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3828 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3830 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3831 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3833 if (!ost->initialized && !ost->inputs_done)
3836 if (!ost->finished && opts < opts_min) {
3838 ost_min = ost->unavailable ? NULL : ost;
3844 static void set_tty_echo(int on)
3848 if (tcgetattr(0, &tty) == 0) {
3849 if (on) tty.c_lflag |= ECHO;
3850 else tty.c_lflag &= ~ECHO;
3851 tcsetattr(0, TCSANOW, &tty);
3856 static int check_keyboard_interaction(int64_t cur_time)
3859 static int64_t last_time;
3860 if (received_nb_signals)
3861 return AVERROR_EXIT;
3862 /* read_key() returns 0 on EOF */
3863 if(cur_time - last_time >= 100000 && !run_as_daemon){
3865 last_time = cur_time;
3869 return AVERROR_EXIT;
3870 if (key == '+') av_log_set_level(av_log_get_level()+10);
3871 if (key == '-') av_log_set_level(av_log_get_level()-10);
3872 if (key == 's') qp_hist ^= 1;
3875 do_hex_dump = do_pkt_dump = 0;
3876 } else if(do_pkt_dump){
3880 av_log_set_level(AV_LOG_DEBUG);
3882 if (key == 'c' || key == 'C'){
3883 char buf[4096], target[64], command[256], arg[256] = {0};
3886 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3889 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3894 fprintf(stderr, "\n");
3896 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3897 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3898 target, time, command, arg);
3899 for (i = 0; i < nb_filtergraphs; i++) {
3900 FilterGraph *fg = filtergraphs[i];
3903 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3904 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3905 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3906 } else if (key == 'c') {
3907 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3908 ret = AVERROR_PATCHWELCOME;
3910 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3912 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3917 av_log(NULL, AV_LOG_ERROR,
3918 "Parse error, at least 3 arguments were expected, "
3919 "only %d given in string '%s'\n", n, buf);
3922 if (key == 'd' || key == 'D'){
3925 debug = input_streams[0]->st->codec->debug<<1;
3926 if(!debug) debug = 1;
3927 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3934 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3939 fprintf(stderr, "\n");
3940 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3941 fprintf(stderr,"error parsing debug value\n");
3943 for(i=0;i<nb_input_streams;i++) {
3944 input_streams[i]->st->codec->debug = debug;
3946 for(i=0;i<nb_output_streams;i++) {
3947 OutputStream *ost = output_streams[i];
3948 ost->enc_ctx->debug = debug;
3950 if(debug) av_log_set_level(AV_LOG_DEBUG);
3951 fprintf(stderr,"debug=%d\n", debug);
3954 fprintf(stderr, "key function\n"
3955 "? show this help\n"
3956 "+ increase verbosity\n"
3957 "- decrease verbosity\n"
3958 "c Send command to first matching filter supporting it\n"
3959 "C Send/Queue command to all matching filters\n"
3960 "D cycle through available debug modes\n"
3961 "h dump packets/hex press to cycle through the 3 states\n"
3963 "s Show QP histogram\n"
3970 static void *input_thread(void *arg)
3973 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3978 ret = av_read_frame(f->ctx, &pkt);
3980 if (ret == AVERROR(EAGAIN)) {
3985 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3988 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3989 if (flags && ret == AVERROR(EAGAIN)) {
3991 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3992 av_log(f->ctx, AV_LOG_WARNING,
3993 "Thread message queue blocking; consider raising the "
3994 "thread_queue_size option (current value: %d)\n",
3995 f->thread_queue_size);
3998 if (ret != AVERROR_EOF)
3999 av_log(f->ctx, AV_LOG_ERROR,
4000 "Unable to send packet to main thread: %s\n",
4002 av_packet_unref(&pkt);
4003 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4011 static void free_input_threads(void)
4015 for (i = 0; i < nb_input_files; i++) {
4016 InputFile *f = input_files[i];
4019 if (!f || !f->in_thread_queue)
4021 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4022 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4023 av_packet_unref(&pkt);
4025 pthread_join(f->thread, NULL);
4027 av_thread_message_queue_free(&f->in_thread_queue);
4031 static int init_input_threads(void)
4035 if (nb_input_files == 1)
4038 for (i = 0; i < nb_input_files; i++) {
4039 InputFile *f = input_files[i];
4041 if (f->ctx->pb ? !f->ctx->pb->seekable :
4042 strcmp(f->ctx->iformat->name, "lavfi"))
4043 f->non_blocking = 1;
4044 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4045 f->thread_queue_size, sizeof(AVPacket));
4049 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4050 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4051 av_thread_message_queue_free(&f->in_thread_queue);
4052 return AVERROR(ret);
4058 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4060 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4062 AV_THREAD_MESSAGE_NONBLOCK : 0);
4066 static int get_input_packet(InputFile *f, AVPacket *pkt)
4070 for (i = 0; i < f->nb_streams; i++) {
4071 InputStream *ist = input_streams[f->ist_index + i];
4072 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4073 int64_t now = av_gettime_relative() - ist->start;
4075 return AVERROR(EAGAIN);
4080 if (nb_input_files > 1)
4081 return get_input_packet_mt(f, pkt);
4083 return av_read_frame(f->ctx, pkt);
4086 static int got_eagain(void)
4089 for (i = 0; i < nb_output_streams; i++)
4090 if (output_streams[i]->unavailable)
4095 static void reset_eagain(void)
4098 for (i = 0; i < nb_input_files; i++)
4099 input_files[i]->eagain = 0;
4100 for (i = 0; i < nb_output_streams; i++)
4101 output_streams[i]->unavailable = 0;
4104 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4105 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4106 AVRational time_base)
4112 return tmp_time_base;
4115 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4118 return tmp_time_base;
4124 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4127 AVCodecContext *avctx;
4128 int i, ret, has_audio = 0;
4129 int64_t duration = 0;
4131 ret = av_seek_frame(is, -1, is->start_time, 0);
4135 for (i = 0; i < ifile->nb_streams; i++) {
4136 ist = input_streams[ifile->ist_index + i];
4137 avctx = ist->dec_ctx;
4140 if (ist->decoding_needed) {
4141 process_input_packet(ist, NULL, 1);
4142 avcodec_flush_buffers(avctx);
4145 /* duration is the length of the last frame in a stream
4146 * when audio stream is present we don't care about
4147 * last video frame length because it's not defined exactly */
4148 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4152 for (i = 0; i < ifile->nb_streams; i++) {
4153 ist = input_streams[ifile->ist_index + i];
4154 avctx = ist->dec_ctx;
4157 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4158 AVRational sample_rate = {1, avctx->sample_rate};
4160 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4164 if (ist->framerate.num) {
4165 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4166 } else if (ist->st->avg_frame_rate.num) {
4167 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4168 } else duration = 1;
4170 if (!ifile->duration)
4171 ifile->time_base = ist->st->time_base;
4172 /* the total duration of the stream, max_pts - min_pts is
4173 * the duration of the stream without the last frame */
4174 duration += ist->max_pts - ist->min_pts;
4175 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4179 if (ifile->loop > 0)
4187 * - 0 -- one packet was read and processed
4188 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4189 * this function should be called again
4190 * - AVERROR_EOF -- this function should not be called again
4192 static int process_input(int file_index)
4194 InputFile *ifile = input_files[file_index];
4195 AVFormatContext *is;
4203 ret = get_input_packet(ifile, &pkt);
4205 if (ret == AVERROR(EAGAIN)) {
4209 if (ret < 0 && ifile->loop) {
4210 if ((ret = seek_to_start(ifile, is)) < 0)
4212 ret = get_input_packet(ifile, &pkt);
4213 if (ret == AVERROR(EAGAIN)) {
4219 if (ret != AVERROR_EOF) {
4220 print_error(is->filename, ret);
4225 for (i = 0; i < ifile->nb_streams; i++) {
4226 ist = input_streams[ifile->ist_index + i];
4227 if (ist->decoding_needed) {
4228 ret = process_input_packet(ist, NULL, 0);
4233 /* mark all outputs that don't go through lavfi as finished */
4234 for (j = 0; j < nb_output_streams; j++) {
4235 OutputStream *ost = output_streams[j];
4237 if (ost->source_index == ifile->ist_index + i &&
4238 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4239 finish_output_stream(ost);
4243 ifile->eof_reached = 1;
4244 return AVERROR(EAGAIN);
4250 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4251 is->streams[pkt.stream_index]);
4253 /* the following test is needed in case new streams appear
4254 dynamically in stream : we ignore them */
4255 if (pkt.stream_index >= ifile->nb_streams) {
4256 report_new_stream(file_index, &pkt);
4257 goto discard_packet;
4260 ist = input_streams[ifile->ist_index + pkt.stream_index];
4262 ist->data_size += pkt.size;
4266 goto discard_packet;
4268 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4269 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4274 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4275 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4276 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4277 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4278 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4279 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4280 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4281 av_ts2str(input_files[ist->file_index]->ts_offset),
4282 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4285 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4286 int64_t stime, stime2;
4287 // Correcting starttime based on the enabled streams
4288 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4289 // so we instead do it here as part of discontinuity handling
4290 if ( ist->next_dts == AV_NOPTS_VALUE
4291 && ifile->ts_offset == -is->start_time
4292 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4293 int64_t new_start_time = INT64_MAX;
4294 for (i=0; i<is->nb_streams; i++) {
4295 AVStream *st = is->streams[i];
4296 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4298 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4300 if (new_start_time > is->start_time) {
4301 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4302 ifile->ts_offset = -new_start_time;
4306 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4307 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4308 ist->wrap_correction_done = 1;
4310 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4311 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4312 ist->wrap_correction_done = 0;
4314 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4315 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4316 ist->wrap_correction_done = 0;
4320 /* add the stream-global side data to the first packet */
4321 if (ist->nb_packets == 1) {
4322 for (i = 0; i < ist->st->nb_side_data; i++) {
4323 AVPacketSideData *src_sd = &ist->st->side_data[i];
4326 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4329 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4332 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4336 memcpy(dst_data, src_sd->data, src_sd->size);
4340 if (pkt.dts != AV_NOPTS_VALUE)
4341 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4342 if (pkt.pts != AV_NOPTS_VALUE)
4343 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4345 if (pkt.pts != AV_NOPTS_VALUE)
4346 pkt.pts *= ist->ts_scale;
4347 if (pkt.dts != AV_NOPTS_VALUE)
4348 pkt.dts *= ist->ts_scale;
4350 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4351 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4352 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4353 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4354 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4355 int64_t delta = pkt_dts - ifile->last_ts;
4356 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4357 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4358 ifile->ts_offset -= delta;
4359 av_log(NULL, AV_LOG_DEBUG,
4360 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4361 delta, ifile->ts_offset);
4362 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4363 if (pkt.pts != AV_NOPTS_VALUE)
4364 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4368 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4369 if (pkt.pts != AV_NOPTS_VALUE) {
4370 pkt.pts += duration;
4371 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4372 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4375 if (pkt.dts != AV_NOPTS_VALUE)
4376 pkt.dts += duration;
4378 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4379 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4380 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4381 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4383 int64_t delta = pkt_dts - ist->next_dts;
4384 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4385 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4386 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4387 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4388 ifile->ts_offset -= delta;
4389 av_log(NULL, AV_LOG_DEBUG,
4390 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4391 delta, ifile->ts_offset);
4392 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4393 if (pkt.pts != AV_NOPTS_VALUE)
4394 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4397 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4398 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4399 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4400 pkt.dts = AV_NOPTS_VALUE;
4402 if (pkt.pts != AV_NOPTS_VALUE){
4403 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4404 delta = pkt_pts - ist->next_dts;
4405 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4406 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4407 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4408 pkt.pts = AV_NOPTS_VALUE;
4414 if (pkt.dts != AV_NOPTS_VALUE)
4415 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4418 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4419 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4420 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4421 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4422 av_ts2str(input_files[ist->file_index]->ts_offset),
4423 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4426 sub2video_heartbeat(ist, pkt.pts);
4428 process_input_packet(ist, &pkt, 0);
4431 av_packet_unref(&pkt);
4437 * Perform a step of transcoding for the specified filter graph.
4439 * @param[in] graph filter graph to consider
4440 * @param[out] best_ist input stream where a frame would allow to continue
4441 * @return 0 for success, <0 for error
4443 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4446 int nb_requests, nb_requests_max = 0;
4447 InputFilter *ifilter;
4451 ret = avfilter_graph_request_oldest(graph->graph);
4453 return reap_filters(0);
4455 if (ret == AVERROR_EOF) {
4456 ret = reap_filters(1);
4457 for (i = 0; i < graph->nb_outputs; i++)
4458 close_output_stream(graph->outputs[i]->ost);
4461 if (ret != AVERROR(EAGAIN))
4464 for (i = 0; i < graph->nb_inputs; i++) {
4465 ifilter = graph->inputs[i];
4467 if (input_files[ist->file_index]->eagain ||
4468 input_files[ist->file_index]->eof_reached)
4470 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4471 if (nb_requests > nb_requests_max) {
4472 nb_requests_max = nb_requests;
4478 for (i = 0; i < graph->nb_outputs; i++)
4479 graph->outputs[i]->ost->unavailable = 1;
4485 * Run a single step of transcoding.
4487 * @return 0 for success, <0 for error
4489 static int transcode_step(void)
4492 InputStream *ist = NULL;
4495 ost = choose_output();
4502 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4506 if (ost->filter && !ost->filter->graph->graph) {
4507 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4508 ret = configure_filtergraph(ost->filter->graph);
4510 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4516 if (ost->filter && ost->filter->graph->graph) {
4517 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4521 } else if (ost->filter) {
4523 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4524 InputFilter *ifilter = ost->filter->graph->inputs[i];
4525 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4531 ost->inputs_done = 1;
4535 av_assert0(ost->source_index >= 0);
4536 ist = input_streams[ost->source_index];
4539 ret = process_input(ist->file_index);
4540 if (ret == AVERROR(EAGAIN)) {
4541 if (input_files[ist->file_index]->eagain)
4542 ost->unavailable = 1;
4547 return ret == AVERROR_EOF ? 0 : ret;
4549 return reap_filters(0);
4553 * The following code is the main loop of the file converter
4555 static int transcode(void)
4558 AVFormatContext *os;
4561 int64_t timer_start;
4562 int64_t total_packets_written = 0;
4564 ret = transcode_init();
4568 if (stdin_interaction) {
4569 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4572 timer_start = av_gettime_relative();
4575 if ((ret = init_input_threads()) < 0)
4579 while (!received_sigterm) {
4580 int64_t cur_time= av_gettime_relative();
4582 /* if 'q' pressed, exits */
4583 if (stdin_interaction)
4584 if (check_keyboard_interaction(cur_time) < 0)
4587 /* check if there's any stream where output is still needed */
4588 if (!need_output()) {
4589 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4593 ret = transcode_step();
4594 if (ret < 0 && ret != AVERROR_EOF) {
4596 av_strerror(ret, errbuf, sizeof(errbuf));
4598 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4602 /* dump report by using the output first video and audio streams */
4603 print_report(0, timer_start, cur_time);
4606 free_input_threads();
4609 /* at the end of stream, we must flush the decoder buffers */
4610 for (i = 0; i < nb_input_streams; i++) {
4611 ist = input_streams[i];
4612 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4613 process_input_packet(ist, NULL, 0);
4620 /* write the trailer if needed and close file */
4621 for (i = 0; i < nb_output_files; i++) {
4622 os = output_files[i]->ctx;
4623 if (!output_files[i]->header_written) {
4624 av_log(NULL, AV_LOG_ERROR,
4625 "Nothing was written into output file %d (%s), because "
4626 "at least one of its streams received no packets.\n",
4630 if ((ret = av_write_trailer(os)) < 0) {
4631 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4637 /* dump report by using the first video and audio streams */
4638 print_report(1, timer_start, av_gettime_relative());
4640 /* close each encoder */
4641 for (i = 0; i < nb_output_streams; i++) {
4642 ost = output_streams[i];
4643 if (ost->encoding_needed) {
4644 av_freep(&ost->enc_ctx->stats_in);
4646 total_packets_written += ost->packets_written;
4649 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4650 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4654 /* close each decoder */
4655 for (i = 0; i < nb_input_streams; i++) {
4656 ist = input_streams[i];
4657 if (ist->decoding_needed) {
4658 avcodec_close(ist->dec_ctx);
4659 if (ist->hwaccel_uninit)
4660 ist->hwaccel_uninit(ist->dec_ctx);
4664 av_buffer_unref(&hw_device_ctx);
4671 free_input_threads();
4674 if (output_streams) {
4675 for (i = 0; i < nb_output_streams; i++) {
4676 ost = output_streams[i];
4679 if (fclose(ost->logfile))
4680 av_log(NULL, AV_LOG_ERROR,
4681 "Error closing logfile, loss of information possible: %s\n",
4682 av_err2str(AVERROR(errno)));
4683 ost->logfile = NULL;
4685 av_freep(&ost->forced_kf_pts);
4686 av_freep(&ost->apad);
4687 av_freep(&ost->disposition);
4688 av_dict_free(&ost->encoder_opts);
4689 av_dict_free(&ost->sws_dict);
4690 av_dict_free(&ost->swr_opts);
4691 av_dict_free(&ost->resample_opts);
4699 static int64_t getutime(void)
4702 struct rusage rusage;
4704 getrusage(RUSAGE_SELF, &rusage);
4705 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4706 #elif HAVE_GETPROCESSTIMES
4708 FILETIME c, e, k, u;
4709 proc = GetCurrentProcess();
4710 GetProcessTimes(proc, &c, &e, &k, &u);
4711 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4713 return av_gettime_relative();
4717 static int64_t getmaxrss(void)
4719 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4720 struct rusage rusage;
4721 getrusage(RUSAGE_SELF, &rusage);
4722 return (int64_t)rusage.ru_maxrss * 1024;
4723 #elif HAVE_GETPROCESSMEMORYINFO
4725 PROCESS_MEMORY_COUNTERS memcounters;
4726 proc = GetCurrentProcess();
4727 memcounters.cb = sizeof(memcounters);
4728 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4729 return memcounters.PeakPagefileUsage;
4735 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4739 int main(int argc, char **argv)
4746 register_exit(ffmpeg_cleanup);
4748 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4750 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4751 parse_loglevel(argc, argv, options);
4753 if(argc>1 && !strcmp(argv[1], "-d")){
4755 av_log_set_callback(log_callback_null);
4760 avcodec_register_all();
4762 avdevice_register_all();
4764 avfilter_register_all();
4766 avformat_network_init();
4768 show_banner(argc, argv, options);
4770 /* parse options and open all input/output files */
4771 ret = ffmpeg_parse_options(argc, argv);
4775 if (nb_output_files <= 0 && nb_input_files == 0) {
4777 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4781 /* file converter / grab */
4782 if (nb_output_files <= 0) {
4783 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4787 // if (nb_input_files == 0) {
4788 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4792 for (i = 0; i < nb_output_files; i++) {
4793 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4797 current_time = ti = getutime();
4798 if (transcode() < 0)
4800 ti = getutime() - ti;
4802 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4804 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4805 decode_error_stat[0], decode_error_stat[1]);
4806 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4809 exit_program(received_nb_signals ? 255 : main_return_code);
4810 return main_return_code;