2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
539 av_freep(&ost->bsf_extradata_updated);
541 av_frame_free(&ost->filtered_frame);
542 av_frame_free(&ost->last_frame);
543 av_dict_free(&ost->encoder_opts);
545 av_parser_close(ost->parser);
546 avcodec_free_context(&ost->parser_avctx);
548 av_freep(&ost->forced_keyframes);
549 av_expr_free(ost->forced_keyframes_pexpr);
550 av_freep(&ost->avfilter);
551 av_freep(&ost->logfile_prefix);
553 av_freep(&ost->audio_channels_map);
554 ost->audio_channels_mapped = 0;
556 av_dict_free(&ost->sws_dict);
558 avcodec_free_context(&ost->enc_ctx);
559 avcodec_parameters_free(&ost->ref_par);
561 if (ost->muxing_queue) {
562 while (av_fifo_size(ost->muxing_queue)) {
564 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565 av_packet_unref(&pkt);
567 av_fifo_freep(&ost->muxing_queue);
570 av_freep(&output_streams[i]);
573 free_input_threads();
575 for (i = 0; i < nb_input_files; i++) {
576 avformat_close_input(&input_files[i]->ctx);
577 av_freep(&input_files[i]);
579 for (i = 0; i < nb_input_streams; i++) {
580 InputStream *ist = input_streams[i];
582 av_frame_free(&ist->decoded_frame);
583 av_frame_free(&ist->filter_frame);
584 av_dict_free(&ist->decoder_opts);
585 avsubtitle_free(&ist->prev_sub.subtitle);
586 av_frame_free(&ist->sub2video.frame);
587 av_freep(&ist->filters);
588 av_freep(&ist->hwaccel_device);
589 av_freep(&ist->dts_buffer);
591 avcodec_free_context(&ist->dec_ctx);
593 av_freep(&input_streams[i]);
597 if (fclose(vstats_file))
598 av_log(NULL, AV_LOG_ERROR,
599 "Error closing vstats file, loss of information possible: %s\n",
600 av_err2str(AVERROR(errno)));
602 av_freep(&vstats_filename);
604 av_freep(&input_streams);
605 av_freep(&input_files);
606 av_freep(&output_streams);
607 av_freep(&output_files);
611 avformat_network_deinit();
613 if (received_sigterm) {
614 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615 (int) received_sigterm);
616 } else if (ret && atomic_load(&transcode_init_done)) {
617 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
623 void remove_avoptions(AVDictionary **a, AVDictionary *b)
625 AVDictionaryEntry *t = NULL;
627 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
632 void assert_avoptions(AVDictionary *m)
634 AVDictionaryEntry *t;
635 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
641 static void abort_codec_experimental(AVCodec *c, int encoder)
646 static void update_benchmark(const char *fmt, ...)
648 if (do_benchmark_all) {
649 int64_t t = getutime();
655 vsnprintf(buf, sizeof(buf), fmt, va);
657 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
663 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
666 for (i = 0; i < nb_output_streams; i++) {
667 OutputStream *ost2 = output_streams[i];
668 ost2->finished |= ost == ost2 ? this_stream : others;
672 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
674 AVFormatContext *s = of->ctx;
675 AVStream *st = ost->st;
679 * Audio encoders may split the packets -- #frames in != #packets out.
680 * But there is no reordering, so we can limit the number of output packets
681 * by simply dropping them here.
682 * Counting encoded video frames needs to be done separately because of
683 * reordering, see do_video_out().
684 * Do not count the packet when unqueued because it has been counted when queued.
686 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
687 if (ost->frame_number >= ost->max_frames) {
688 av_packet_unref(pkt);
694 if (!of->header_written) {
695 AVPacket tmp_pkt = {0};
696 /* the muxer is not initialized yet, buffer the packet */
697 if (!av_fifo_space(ost->muxing_queue)) {
698 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
699 ost->max_muxing_queue_size);
700 if (new_size <= av_fifo_size(ost->muxing_queue)) {
701 av_log(NULL, AV_LOG_ERROR,
702 "Too many packets buffered for output stream %d:%d.\n",
703 ost->file_index, ost->st->index);
706 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
710 ret = av_packet_ref(&tmp_pkt, pkt);
713 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
714 av_packet_unref(pkt);
718 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
719 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
720 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
722 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
724 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
726 ost->quality = sd ? AV_RL32(sd) : -1;
727 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
729 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
731 ost->error[i] = AV_RL64(sd + 8 + 8*i);
736 if (ost->frame_rate.num && ost->is_cfr) {
737 if (pkt->duration > 0)
738 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
739 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
744 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
746 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
747 if (pkt->dts != AV_NOPTS_VALUE &&
748 pkt->pts != AV_NOPTS_VALUE &&
749 pkt->dts > pkt->pts) {
750 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
752 ost->file_index, ost->st->index);
754 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
755 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
756 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
758 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
759 pkt->dts != AV_NOPTS_VALUE &&
760 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
761 ost->last_mux_dts != AV_NOPTS_VALUE) {
762 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
763 if (pkt->dts < max) {
764 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
765 av_log(s, loglevel, "Non-monotonous DTS in output stream "
766 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
767 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
769 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
772 av_log(s, loglevel, "changing to %"PRId64". This may result "
773 "in incorrect timestamps in the output file.\n",
775 if (pkt->pts >= pkt->dts)
776 pkt->pts = FFMAX(pkt->pts, max);
781 ost->last_mux_dts = pkt->dts;
783 ost->data_size += pkt->size;
784 ost->packets_written++;
786 pkt->stream_index = ost->index;
789 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
790 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791 av_get_media_type_string(ost->enc_ctx->codec_type),
792 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
793 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
798 ret = av_interleaved_write_frame(s, pkt);
800 print_error("av_interleaved_write_frame()", ret);
801 main_return_code = 1;
802 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
804 av_packet_unref(pkt);
807 static void close_output_stream(OutputStream *ost)
809 OutputFile *of = output_files[ost->file_index];
811 ost->finished |= ENCODER_FINISHED;
813 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
814 of->recording_time = FFMIN(of->recording_time, end);
818 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
822 /* apply the output bitstream filters, if any */
823 if (ost->nb_bitstream_filters) {
826 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
832 /* get a packet from the previous filter up the chain */
833 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
834 if (ret == AVERROR(EAGAIN)) {
840 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
841 * the api states this shouldn't happen after init(). Propagate it here to the
842 * muxer and to the next filters in the chain to workaround this.
843 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
844 * par_out->extradata and adapt muxers accordingly to get rid of this. */
845 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
846 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
849 ost->bsf_extradata_updated[idx - 1] |= 1;
852 /* send it to the next filter down the chain or to the muxer */
853 if (idx < ost->nb_bitstream_filters) {
854 /* HACK/FIXME! - See above */
855 if (!(ost->bsf_extradata_updated[idx] & 2)) {
856 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
859 ost->bsf_extradata_updated[idx] |= 2;
861 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
866 write_packet(of, pkt, ost, 0);
869 write_packet(of, pkt, ost, 0);
872 if (ret < 0 && ret != AVERROR_EOF) {
873 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
874 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
880 static int check_recording_time(OutputStream *ost)
882 OutputFile *of = output_files[ost->file_index];
884 if (of->recording_time != INT64_MAX &&
885 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
886 AV_TIME_BASE_Q) >= 0) {
887 close_output_stream(ost);
893 static void do_audio_out(OutputFile *of, OutputStream *ost,
896 AVCodecContext *enc = ost->enc_ctx;
900 av_init_packet(&pkt);
904 if (!check_recording_time(ost))
907 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
908 frame->pts = ost->sync_opts;
909 ost->sync_opts = frame->pts + frame->nb_samples;
910 ost->samples_encoded += frame->nb_samples;
911 ost->frames_encoded++;
913 av_assert0(pkt.size || !pkt.data);
914 update_benchmark(NULL);
916 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
917 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
918 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
919 enc->time_base.num, enc->time_base.den);
922 ret = avcodec_send_frame(enc, frame);
927 ret = avcodec_receive_packet(enc, &pkt);
928 if (ret == AVERROR(EAGAIN))
933 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
935 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
938 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
939 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
940 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
941 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
944 output_packet(of, &pkt, ost);
949 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
953 static void do_subtitle_out(OutputFile *of,
957 int subtitle_out_max_size = 1024 * 1024;
958 int subtitle_out_size, nb, i;
963 if (sub->pts == AV_NOPTS_VALUE) {
964 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
973 subtitle_out = av_malloc(subtitle_out_max_size);
975 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
980 /* Note: DVB subtitle need one packet to draw them and one other
981 packet to clear them */
982 /* XXX: signal it in the codec context ? */
983 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
988 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
990 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
991 pts -= output_files[ost->file_index]->start_time;
992 for (i = 0; i < nb; i++) {
993 unsigned save_num_rects = sub->num_rects;
995 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
996 if (!check_recording_time(ost))
1000 // start_display_time is required to be 0
1001 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1002 sub->end_display_time -= sub->start_display_time;
1003 sub->start_display_time = 0;
1007 ost->frames_encoded++;
1009 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1010 subtitle_out_max_size, sub);
1012 sub->num_rects = save_num_rects;
1013 if (subtitle_out_size < 0) {
1014 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1018 av_init_packet(&pkt);
1019 pkt.data = subtitle_out;
1020 pkt.size = subtitle_out_size;
1021 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1022 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1023 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1024 /* XXX: the pts correction is handled here. Maybe handling
1025 it in the codec would be better */
1027 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1032 output_packet(of, &pkt, ost);
1036 static void do_video_out(OutputFile *of,
1038 AVFrame *next_picture,
1041 int ret, format_video_sync;
1043 AVCodecContext *enc = ost->enc_ctx;
1044 AVCodecParameters *mux_par = ost->st->codecpar;
1045 AVRational frame_rate;
1046 int nb_frames, nb0_frames, i;
1047 double delta, delta0;
1048 double duration = 0;
1050 InputStream *ist = NULL;
1051 AVFilterContext *filter = ost->filter->filter;
1053 if (ost->source_index >= 0)
1054 ist = input_streams[ost->source_index];
1056 frame_rate = av_buffersink_get_frame_rate(filter);
1057 if (frame_rate.num > 0 && frame_rate.den > 0)
1058 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1060 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1061 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1063 if (!ost->filters_script &&
1067 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1068 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1071 if (!next_picture) {
1073 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1074 ost->last_nb0_frames[1],
1075 ost->last_nb0_frames[2]);
1077 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1078 delta = delta0 + duration;
1080 /* by default, we output a single frame */
1081 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1084 format_video_sync = video_sync_method;
1085 if (format_video_sync == VSYNC_AUTO) {
1086 if(!strcmp(of->ctx->oformat->name, "avi")) {
1087 format_video_sync = VSYNC_VFR;
1089 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1091 && format_video_sync == VSYNC_CFR
1092 && input_files[ist->file_index]->ctx->nb_streams == 1
1093 && input_files[ist->file_index]->input_ts_offset == 0) {
1094 format_video_sync = VSYNC_VSCFR;
1096 if (format_video_sync == VSYNC_CFR && copy_ts) {
1097 format_video_sync = VSYNC_VSCFR;
1100 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1104 format_video_sync != VSYNC_PASSTHROUGH &&
1105 format_video_sync != VSYNC_DROP) {
1106 if (delta0 < -0.6) {
1107 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1109 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1110 sync_ipts = ost->sync_opts;
1115 switch (format_video_sync) {
1117 if (ost->frame_number == 0 && delta0 >= 0.5) {
1118 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1121 ost->sync_opts = lrint(sync_ipts);
1124 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1125 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1127 } else if (delta < -1.1)
1129 else if (delta > 1.1) {
1130 nb_frames = lrintf(delta);
1132 nb0_frames = lrintf(delta0 - 0.6);
1138 else if (delta > 0.6)
1139 ost->sync_opts = lrint(sync_ipts);
1142 case VSYNC_PASSTHROUGH:
1143 ost->sync_opts = lrint(sync_ipts);
1150 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1151 nb0_frames = FFMIN(nb0_frames, nb_frames);
1153 memmove(ost->last_nb0_frames + 1,
1154 ost->last_nb0_frames,
1155 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1156 ost->last_nb0_frames[0] = nb0_frames;
1158 if (nb0_frames == 0 && ost->last_dropped) {
1160 av_log(NULL, AV_LOG_VERBOSE,
1161 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1162 ost->frame_number, ost->st->index, ost->last_frame->pts);
1164 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1165 if (nb_frames > dts_error_threshold * 30) {
1166 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1170 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1171 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1172 if (nb_frames_dup > dup_warning) {
1173 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1177 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1179 /* duplicates frame if needed */
1180 for (i = 0; i < nb_frames; i++) {
1181 AVFrame *in_picture;
1182 av_init_packet(&pkt);
1186 if (i < nb0_frames && ost->last_frame) {
1187 in_picture = ost->last_frame;
1189 in_picture = next_picture;
1194 in_picture->pts = ost->sync_opts;
1197 if (!check_recording_time(ost))
1199 if (ost->frame_number >= ost->max_frames)
1203 #if FF_API_LAVF_FMT_RAWPICTURE
1204 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1205 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1206 /* raw pictures are written as AVPicture structure to
1207 avoid any copies. We support temporarily the older
1209 if (in_picture->interlaced_frame)
1210 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1212 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1213 pkt.data = (uint8_t *)in_picture;
1214 pkt.size = sizeof(AVPicture);
1215 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1216 pkt.flags |= AV_PKT_FLAG_KEY;
1218 output_packet(of, &pkt, ost);
1222 int forced_keyframe = 0;
1225 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1226 ost->top_field_first >= 0)
1227 in_picture->top_field_first = !!ost->top_field_first;
1229 if (in_picture->interlaced_frame) {
1230 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1231 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1233 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1235 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1237 in_picture->quality = enc->global_quality;
1238 in_picture->pict_type = 0;
1240 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1241 in_picture->pts * av_q2d(enc->time_base) : NAN;
1242 if (ost->forced_kf_index < ost->forced_kf_count &&
1243 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1244 ost->forced_kf_index++;
1245 forced_keyframe = 1;
1246 } else if (ost->forced_keyframes_pexpr) {
1248 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1249 res = av_expr_eval(ost->forced_keyframes_pexpr,
1250 ost->forced_keyframes_expr_const_values, NULL);
1251 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1252 ost->forced_keyframes_expr_const_values[FKF_N],
1253 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1255 ost->forced_keyframes_expr_const_values[FKF_T],
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1259 forced_keyframe = 1;
1260 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1261 ost->forced_keyframes_expr_const_values[FKF_N];
1262 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1263 ost->forced_keyframes_expr_const_values[FKF_T];
1264 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1267 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1268 } else if ( ost->forced_keyframes
1269 && !strncmp(ost->forced_keyframes, "source", 6)
1270 && in_picture->key_frame==1) {
1271 forced_keyframe = 1;
1274 if (forced_keyframe) {
1275 in_picture->pict_type = AV_PICTURE_TYPE_I;
1276 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1279 update_benchmark(NULL);
1281 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1282 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1283 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1284 enc->time_base.num, enc->time_base.den);
1287 ost->frames_encoded++;
1289 ret = avcodec_send_frame(enc, in_picture);
1294 ret = avcodec_receive_packet(enc, &pkt);
1295 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296 if (ret == AVERROR(EAGAIN))
1302 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1308 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309 pkt.pts = ost->sync_opts;
1311 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1314 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1320 frame_size = pkt.size;
1321 output_packet(of, &pkt, ost);
1323 /* if two pass, output log */
1324 if (ost->logfile && enc->stats_out) {
1325 fprintf(ost->logfile, "%s", enc->stats_out);
1331 * For video, number of frames in == number of packets out.
1332 * But there may be reordering, so we can't throw away frames on encoder
1333 * flush, we need to limit them here, before they go into encoder.
1335 ost->frame_number++;
1337 if (vstats_filename && frame_size)
1338 do_video_stats(ost, frame_size);
1341 if (!ost->last_frame)
1342 ost->last_frame = av_frame_alloc();
1343 av_frame_unref(ost->last_frame);
1344 if (next_picture && ost->last_frame)
1345 av_frame_ref(ost->last_frame, next_picture);
1347 av_frame_free(&ost->last_frame);
1351 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1355 static double psnr(double d)
1357 return -10.0 * log10(d);
1360 static void do_video_stats(OutputStream *ost, int frame_size)
1362 AVCodecContext *enc;
1364 double ti1, bitrate, avg_bitrate;
1366 /* this is executed just the first time do_video_stats is called */
1368 vstats_file = fopen(vstats_filename, "w");
1376 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377 frame_number = ost->st->nb_frames;
1378 if (vstats_version <= 1) {
1379 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380 ost->quality / (float)FF_QP2LAMBDA);
1382 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383 ost->quality / (float)FF_QP2LAMBDA);
1386 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1389 fprintf(vstats_file,"f_size= %6d ", frame_size);
1390 /* compute pts value */
1391 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1395 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1405 static void finish_output_stream(OutputStream *ost)
1407 OutputFile *of = output_files[ost->file_index];
1410 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1413 for (i = 0; i < of->ctx->nb_streams; i++)
1414 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1419 * Get and encode new output from any of the filtergraphs, without causing
1422 * @return 0 for success, <0 for severe errors
1424 static int reap_filters(int flush)
1426 AVFrame *filtered_frame = NULL;
1429 /* Reap all buffers present in the buffer sinks */
1430 for (i = 0; i < nb_output_streams; i++) {
1431 OutputStream *ost = output_streams[i];
1432 OutputFile *of = output_files[ost->file_index];
1433 AVFilterContext *filter;
1434 AVCodecContext *enc = ost->enc_ctx;
1437 if (!ost->filter || !ost->filter->graph->graph)
1439 filter = ost->filter->filter;
1441 if (!ost->initialized) {
1442 char error[1024] = "";
1443 ret = init_output_stream(ost, error, sizeof(error));
1445 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446 ost->file_index, ost->index, error);
1451 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452 return AVERROR(ENOMEM);
1454 filtered_frame = ost->filtered_frame;
1457 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459 AV_BUFFERSINK_FLAG_NO_REQUEST);
1461 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462 av_log(NULL, AV_LOG_WARNING,
1463 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464 } else if (flush && ret == AVERROR_EOF) {
1465 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1466 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1470 if (ost->finished) {
1471 av_frame_unref(filtered_frame);
1474 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1476 AVRational filter_tb = av_buffersink_get_time_base(filter);
1477 AVRational tb = enc->time_base;
1478 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1480 tb.den <<= extra_bits;
1482 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1483 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1484 float_pts /= 1 << extra_bits;
1485 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1488 filtered_frame->pts =
1489 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1490 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1492 //if (ost->source_index >= 0)
1493 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1495 switch (av_buffersink_get_type(filter)) {
1496 case AVMEDIA_TYPE_VIDEO:
1497 if (!ost->frame_aspect_ratio.num)
1498 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1501 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1502 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1504 enc->time_base.num, enc->time_base.den);
1507 do_video_out(of, ost, filtered_frame, float_pts);
1509 case AVMEDIA_TYPE_AUDIO:
1510 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1511 enc->channels != filtered_frame->channels) {
1512 av_log(NULL, AV_LOG_ERROR,
1513 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1516 do_audio_out(of, ost, filtered_frame);
1519 // TODO support subtitle filters
1523 av_frame_unref(filtered_frame);
1530 static void print_final_stats(int64_t total_size)
1532 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1533 uint64_t subtitle_size = 0;
1534 uint64_t data_size = 0;
1535 float percent = -1.0;
1539 for (i = 0; i < nb_output_streams; i++) {
1540 OutputStream *ost = output_streams[i];
1541 switch (ost->enc_ctx->codec_type) {
1542 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1544 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1545 default: other_size += ost->data_size; break;
1547 extra_size += ost->enc_ctx->extradata_size;
1548 data_size += ost->data_size;
1549 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1550 != AV_CODEC_FLAG_PASS1)
1554 if (data_size && total_size>0 && total_size >= data_size)
1555 percent = 100.0 * (total_size - data_size) / data_size;
1557 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1558 video_size / 1024.0,
1559 audio_size / 1024.0,
1560 subtitle_size / 1024.0,
1561 other_size / 1024.0,
1562 extra_size / 1024.0);
1564 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1566 av_log(NULL, AV_LOG_INFO, "unknown");
1567 av_log(NULL, AV_LOG_INFO, "\n");
1569 /* print verbose per-stream stats */
1570 for (i = 0; i < nb_input_files; i++) {
1571 InputFile *f = input_files[i];
1572 uint64_t total_packets = 0, total_size = 0;
1574 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1575 i, f->ctx->filename);
1577 for (j = 0; j < f->nb_streams; j++) {
1578 InputStream *ist = input_streams[f->ist_index + j];
1579 enum AVMediaType type = ist->dec_ctx->codec_type;
1581 total_size += ist->data_size;
1582 total_packets += ist->nb_packets;
1584 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1585 i, j, media_type_string(type));
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1587 ist->nb_packets, ist->data_size);
1589 if (ist->decoding_needed) {
1590 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1591 ist->frames_decoded);
1592 if (type == AVMEDIA_TYPE_AUDIO)
1593 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1594 av_log(NULL, AV_LOG_VERBOSE, "; ");
1597 av_log(NULL, AV_LOG_VERBOSE, "\n");
1600 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1601 total_packets, total_size);
1604 for (i = 0; i < nb_output_files; i++) {
1605 OutputFile *of = output_files[i];
1606 uint64_t total_packets = 0, total_size = 0;
1608 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1609 i, of->ctx->filename);
1611 for (j = 0; j < of->ctx->nb_streams; j++) {
1612 OutputStream *ost = output_streams[of->ost_index + j];
1613 enum AVMediaType type = ost->enc_ctx->codec_type;
1615 total_size += ost->data_size;
1616 total_packets += ost->packets_written;
1618 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1619 i, j, media_type_string(type));
1620 if (ost->encoding_needed) {
1621 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1622 ost->frames_encoded);
1623 if (type == AVMEDIA_TYPE_AUDIO)
1624 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1625 av_log(NULL, AV_LOG_VERBOSE, "; ");
1628 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1629 ost->packets_written, ost->data_size);
1631 av_log(NULL, AV_LOG_VERBOSE, "\n");
1634 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1635 total_packets, total_size);
1637 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1638 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1640 av_log(NULL, AV_LOG_WARNING, "\n");
1642 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1647 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1650 AVBPrint buf_script;
1652 AVFormatContext *oc;
1654 AVCodecContext *enc;
1655 int frame_number, vid, i;
1658 int64_t pts = INT64_MIN + 1;
1659 static int64_t last_time = -1;
1660 static int qp_histogram[52];
1661 int hours, mins, secs, us;
1665 if (!print_stats && !is_last_report && !progress_avio)
1668 if (!is_last_report) {
1669 if (last_time == -1) {
1670 last_time = cur_time;
1673 if ((cur_time - last_time) < 500000)
1675 last_time = cur_time;
1678 t = (cur_time-timer_start) / 1000000.0;
1681 oc = output_files[0]->ctx;
1683 total_size = avio_size(oc->pb);
1684 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1685 total_size = avio_tell(oc->pb);
1689 av_bprint_init(&buf_script, 0, 1);
1690 for (i = 0; i < nb_output_streams; i++) {
1692 ost = output_streams[i];
1694 if (!ost->stream_copy)
1695 q = ost->quality / (float) FF_QP2LAMBDA;
1697 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1699 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700 ost->file_index, ost->index, q);
1702 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1705 frame_number = ost->frame_number;
1706 fps = t > 1 ? frame_number / t : 0;
1707 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1708 frame_number, fps < 9.95, fps, q);
1709 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1710 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1711 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1712 ost->file_index, ost->index, q);
1714 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1718 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1720 for (j = 0; j < 32; j++)
1721 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1724 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1726 double error, error_sum = 0;
1727 double scale, scale_sum = 0;
1729 char type[3] = { 'Y','U','V' };
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1731 for (j = 0; j < 3; j++) {
1732 if (is_last_report) {
1733 error = enc->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1736 error = ost->error[j];
1737 scale = enc->width * enc->height * 255.0 * 255.0;
1743 p = psnr(error / scale);
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1745 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1746 ost->file_index, ost->index, type[j] | 32, p);
1748 p = psnr(error_sum / scale_sum);
1749 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1750 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1751 ost->file_index, ost->index, p);
1755 /* compute min output value */
1756 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1757 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1758 ost->st->time_base, AV_TIME_BASE_Q));
1760 nb_frames_drop += ost->last_dropped;
1763 secs = FFABS(pts) / AV_TIME_BASE;
1764 us = FFABS(pts) % AV_TIME_BASE;
1770 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1771 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1773 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1776 "size=%8.0fkB time=", total_size / 1024.0);
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1780 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1781 (100 * us) / AV_TIME_BASE);
1784 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1785 av_bprintf(&buf_script, "bitrate=N/A\n");
1787 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1788 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1791 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1792 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1793 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1794 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1795 hours, mins, secs, us);
1797 if (nb_frames_dup || nb_frames_drop)
1798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1799 nb_frames_dup, nb_frames_drop);
1800 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1801 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1804 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1805 av_bprintf(&buf_script, "speed=N/A\n");
1807 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1808 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1811 if (print_stats || is_last_report) {
1812 const char end = is_last_report ? '\n' : '\r';
1813 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1814 fprintf(stderr, "%s %c", buf, end);
1816 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1821 if (progress_avio) {
1822 av_bprintf(&buf_script, "progress=%s\n",
1823 is_last_report ? "end" : "continue");
1824 avio_write(progress_avio, buf_script.str,
1825 FFMIN(buf_script.len, buf_script.size - 1));
1826 avio_flush(progress_avio);
1827 av_bprint_finalize(&buf_script, NULL);
1828 if (is_last_report) {
1829 if ((ret = avio_closep(&progress_avio)) < 0)
1830 av_log(NULL, AV_LOG_ERROR,
1831 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1836 print_final_stats(total_size);
1839 static void flush_encoders(void)
1843 for (i = 0; i < nb_output_streams; i++) {
1844 OutputStream *ost = output_streams[i];
1845 AVCodecContext *enc = ost->enc_ctx;
1846 OutputFile *of = output_files[ost->file_index];
1848 if (!ost->encoding_needed)
1851 // Try to enable encoding with no input frames.
1852 // Maybe we should just let encoding fail instead.
1853 if (!ost->initialized) {
1854 FilterGraph *fg = ost->filter->graph;
1855 char error[1024] = "";
1857 av_log(NULL, AV_LOG_WARNING,
1858 "Finishing stream %d:%d without any data written to it.\n",
1859 ost->file_index, ost->st->index);
1861 if (ost->filter && !fg->graph) {
1863 for (x = 0; x < fg->nb_inputs; x++) {
1864 InputFilter *ifilter = fg->inputs[x];
1865 if (ifilter->format < 0) {
1866 AVCodecParameters *par = ifilter->ist->st->codecpar;
1867 // We never got any input. Set a fake format, which will
1868 // come from libavformat.
1869 ifilter->format = par->format;
1870 ifilter->sample_rate = par->sample_rate;
1871 ifilter->channels = par->channels;
1872 ifilter->channel_layout = par->channel_layout;
1873 ifilter->width = par->width;
1874 ifilter->height = par->height;
1875 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1879 if (!ifilter_has_all_input_formats(fg))
1882 ret = configure_filtergraph(fg);
1884 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1888 finish_output_stream(ost);
1891 ret = init_output_stream(ost, error, sizeof(error));
1893 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1894 ost->file_index, ost->index, error);
1899 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1901 #if FF_API_LAVF_FMT_RAWPICTURE
1902 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1906 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1910 const char *desc = NULL;
1914 switch (enc->codec_type) {
1915 case AVMEDIA_TYPE_AUDIO:
1918 case AVMEDIA_TYPE_VIDEO:
1925 av_init_packet(&pkt);
1929 update_benchmark(NULL);
1931 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1932 ret = avcodec_send_frame(enc, NULL);
1934 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1941 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1942 if (ret < 0 && ret != AVERROR_EOF) {
1943 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948 if (ost->logfile && enc->stats_out) {
1949 fprintf(ost->logfile, "%s", enc->stats_out);
1951 if (ret == AVERROR_EOF) {
1954 if (ost->finished & MUXER_FINISHED) {
1955 av_packet_unref(&pkt);
1958 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959 pkt_size = pkt.size;
1960 output_packet(of, &pkt, ost);
1961 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1962 do_video_stats(ost, pkt_size);
1969 * Check whether a packet from ist should be written into ost at this time
1971 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1973 OutputFile *of = output_files[ost->file_index];
1974 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1976 if (ost->source_index != ist_index)
1982 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1990 OutputFile *of = output_files[ost->file_index];
1991 InputFile *f = input_files [ist->file_index];
1992 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997 av_init_packet(&opkt);
1999 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000 !ost->copy_initial_nonkeyframes)
2003 if (!ost->frame_number && !ost->copy_prior_start) {
2004 int64_t comp_start = start_time;
2005 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007 if (pkt->pts == AV_NOPTS_VALUE ?
2008 ist->pts < comp_start :
2009 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2013 if (of->recording_time != INT64_MAX &&
2014 ist->pts >= of->recording_time + start_time) {
2015 close_output_stream(ost);
2019 if (f->recording_time != INT64_MAX) {
2020 start_time = f->ctx->start_time;
2021 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022 start_time += f->start_time;
2023 if (ist->pts >= f->recording_time + start_time) {
2024 close_output_stream(ost);
2029 /* force the input stream PTS */
2030 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 opkt.pts = AV_NOPTS_VALUE;
2038 if (pkt->dts == AV_NOPTS_VALUE)
2039 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2041 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042 opkt.dts -= ost_tb_start_time;
2044 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2047 duration = ist->dec_ctx->frame_size;
2048 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2050 ost->mux_timebase) - ost_tb_start_time;
2053 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2055 opkt.flags = pkt->flags;
2056 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2062 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063 &opkt.data, &opkt.size,
2064 pkt->data, pkt->size,
2065 pkt->flags & AV_PKT_FLAG_KEY);
2067 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2072 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2080 av_copy_packet_side_data(&opkt, pkt);
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086 /* store AVPicture in AVPacket, as expected by the output format */
2087 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2089 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2093 opkt.data = (uint8_t *)&pict;
2094 opkt.size = sizeof(AVPicture);
2095 opkt.flags |= AV_PKT_FLAG_KEY;
2099 output_packet(of, &opkt, ost);
2102 int guess_input_channel_layout(InputStream *ist)
2104 AVCodecContext *dec = ist->dec_ctx;
2106 if (!dec->channel_layout) {
2107 char layout_name[256];
2109 if (dec->channels > ist->guess_layout_max)
2111 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2112 if (!dec->channel_layout)
2114 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115 dec->channels, dec->channel_layout);
2116 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2124 if (*got_output || ret<0)
2125 decode_error_stat[ret<0] ++;
2127 if (ret < 0 && exit_on_error)
2130 if (exit_on_error && *got_output && ist) {
2131 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2132 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2157 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2161 switch (ifilter->ist->st->codecpar->codec_type) {
2162 case AVMEDIA_TYPE_AUDIO:
2163 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164 ifilter->channels != frame->channels ||
2165 ifilter->channel_layout != frame->channel_layout;
2167 case AVMEDIA_TYPE_VIDEO:
2168 need_reinit |= ifilter->width != frame->width ||
2169 ifilter->height != frame->height;
2174 ret = ifilter_parameters_from_frame(ifilter, frame);
2179 /* (re)init the graph if possible, otherwise buffer the frame and return */
2180 if (need_reinit || !fg->graph) {
2181 for (i = 0; i < fg->nb_inputs; i++) {
2182 if (!ifilter_has_all_input_formats(fg)) {
2183 AVFrame *tmp = av_frame_clone(frame);
2185 return AVERROR(ENOMEM);
2186 av_frame_unref(frame);
2188 if (!av_fifo_space(ifilter->frame_queue)) {
2189 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2191 av_frame_free(&tmp);
2195 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200 ret = reap_filters(1);
2201 if (ret < 0 && ret != AVERROR_EOF) {
2203 av_strerror(ret, errbuf, sizeof(errbuf));
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2209 ret = configure_filtergraph(fg);
2211 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2218 if (ret != AVERROR_EOF)
2219 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226 static int ifilter_send_eof(InputFilter *ifilter)
2232 if (ifilter->filter) {
2233 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2237 // the filtergraph was never configured
2238 FilterGraph *fg = ifilter->graph;
2239 for (i = 0; i < fg->nb_inputs; i++)
2240 if (!fg->inputs[i]->eof)
2242 if (i == fg->nb_inputs) {
2243 // All the input streams have finished without the filtergraph
2244 // ever being configured.
2245 // Mark the output streams as finished.
2246 for (j = 0; j < fg->nb_outputs; j++)
2247 finish_output_stream(fg->outputs[j]->ost);
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2257 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2265 ret = avcodec_send_packet(avctx, pkt);
2266 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267 // decoded frames with avcodec_receive_frame() until done.
2268 if (ret < 0 && ret != AVERROR_EOF)
2272 ret = avcodec_receive_frame(avctx, frame);
2273 if (ret < 0 && ret != AVERROR(EAGAIN))
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2286 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287 for (i = 0; i < ist->nb_filters; i++) {
2288 if (i < ist->nb_filters - 1) {
2289 f = ist->filter_frame;
2290 ret = av_frame_ref(f, decoded_frame);
2295 ret = ifilter_send_frame(ist->filters[i], f);
2296 if (ret == AVERROR_EOF)
2297 ret = 0; /* ignore */
2299 av_log(NULL, AV_LOG_ERROR,
2300 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2310 AVFrame *decoded_frame;
2311 AVCodecContext *avctx = ist->dec_ctx;
2313 AVRational decoded_frame_tb;
2315 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318 return AVERROR(ENOMEM);
2319 decoded_frame = ist->decoded_frame;
2321 update_benchmark(NULL);
2322 ret = decode(avctx, decoded_frame, got_output, pkt);
2323 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2327 if (ret >= 0 && avctx->sample_rate <= 0) {
2328 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329 ret = AVERROR_INVALIDDATA;
2332 if (ret != AVERROR_EOF)
2333 check_decode_result(ist, got_output, ret);
2335 if (!*got_output || ret < 0)
2338 ist->samples_decoded += decoded_frame->nb_samples;
2339 ist->frames_decoded++;
2342 /* increment next_dts to use for the case where the input stream does not
2343 have timestamps or there are multiple frames in the packet */
2344 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2346 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2350 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351 decoded_frame_tb = ist->st->time_base;
2352 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353 decoded_frame->pts = pkt->pts;
2354 decoded_frame_tb = ist->st->time_base;
2356 decoded_frame->pts = ist->dts;
2357 decoded_frame_tb = AV_TIME_BASE_Q;
2359 if (decoded_frame->pts != AV_NOPTS_VALUE)
2360 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362 (AVRational){1, avctx->sample_rate});
2363 ist->nb_samples = decoded_frame->nb_samples;
2364 err = send_frame_to_filters(ist, decoded_frame);
2366 av_frame_unref(ist->filter_frame);
2367 av_frame_unref(decoded_frame);
2368 return err < 0 ? err : ret;
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2374 AVFrame *decoded_frame;
2375 int i, ret = 0, err = 0;
2376 int64_t best_effort_timestamp;
2377 int64_t dts = AV_NOPTS_VALUE;
2380 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2383 if (!eof && pkt && pkt->size == 0)
2386 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389 return AVERROR(ENOMEM);
2390 decoded_frame = ist->decoded_frame;
2391 if (ist->dts != AV_NOPTS_VALUE)
2392 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2395 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2398 // The old code used to set dts on the drain packet, which does not work
2399 // with the new API anymore.
2401 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2403 return AVERROR(ENOMEM);
2404 ist->dts_buffer = new;
2405 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2408 update_benchmark(NULL);
2409 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2414 // The following line may be required in some cases where there is no parser
2415 // or the parser does not has_b_frames correctly
2416 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2420 av_log(ist->dec_ctx, AV_LOG_WARNING,
2421 "video_delay is larger in decoder than demuxer %d > %d.\n"
2422 "If you want to help, upload a sample "
2423 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425 ist->dec_ctx->has_b_frames,
2426 ist->st->codecpar->video_delay);
2429 if (ret != AVERROR_EOF)
2430 check_decode_result(ist, got_output, ret);
2432 if (*got_output && ret >= 0) {
2433 if (ist->dec_ctx->width != decoded_frame->width ||
2434 ist->dec_ctx->height != decoded_frame->height ||
2435 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437 decoded_frame->width,
2438 decoded_frame->height,
2439 decoded_frame->format,
2440 ist->dec_ctx->width,
2441 ist->dec_ctx->height,
2442 ist->dec_ctx->pix_fmt);
2446 if (!*got_output || ret < 0)
2449 if(ist->top_field_first>=0)
2450 decoded_frame->top_field_first = ist->top_field_first;
2452 ist->frames_decoded++;
2454 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2459 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2461 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2463 if (ist->framerate.num)
2464 best_effort_timestamp = ist->cfr_next_pts++;
2466 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2467 best_effort_timestamp = ist->dts_buffer[0];
2469 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2470 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2471 ist->nb_dts_buffer--;
2474 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2475 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2477 if (ts != AV_NOPTS_VALUE)
2478 ist->next_pts = ist->pts = ts;
2482 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2483 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2484 ist->st->index, av_ts2str(decoded_frame->pts),
2485 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2486 best_effort_timestamp,
2487 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2488 decoded_frame->key_frame, decoded_frame->pict_type,
2489 ist->st->time_base.num, ist->st->time_base.den);
2492 if (ist->st->sample_aspect_ratio.num)
2493 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2495 err = send_frame_to_filters(ist, decoded_frame);
2498 av_frame_unref(ist->filter_frame);
2499 av_frame_unref(decoded_frame);
2500 return err < 0 ? err : ret;
2503 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2506 AVSubtitle subtitle;
2508 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2509 &subtitle, got_output, pkt);
2511 check_decode_result(NULL, got_output, ret);
2513 if (ret < 0 || !*got_output) {
2516 sub2video_flush(ist);
2520 if (ist->fix_sub_duration) {
2522 if (ist->prev_sub.got_output) {
2523 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2524 1000, AV_TIME_BASE);
2525 if (end < ist->prev_sub.subtitle.end_display_time) {
2526 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2527 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2528 ist->prev_sub.subtitle.end_display_time, end,
2529 end <= 0 ? ", dropping it" : "");
2530 ist->prev_sub.subtitle.end_display_time = end;
2533 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2534 FFSWAP(int, ret, ist->prev_sub.ret);
2535 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2543 if (ist->sub2video.frame) {
2544 sub2video_update(ist, &subtitle);
2545 } else if (ist->nb_filters) {
2546 if (!ist->sub2video.sub_queue)
2547 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2548 if (!ist->sub2video.sub_queue)
2550 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2551 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2555 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2559 if (!subtitle.num_rects)
2562 ist->frames_decoded++;
2564 for (i = 0; i < nb_output_streams; i++) {
2565 OutputStream *ost = output_streams[i];
2567 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2568 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2571 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2576 avsubtitle_free(&subtitle);
2580 static int send_filter_eof(InputStream *ist)
2583 for (i = 0; i < ist->nb_filters; i++) {
2584 ret = ifilter_send_eof(ist->filters[i]);
2591 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2592 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2596 int eof_reached = 0;
2599 if (!ist->saw_first_ts) {
2600 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2602 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2603 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2604 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2606 ist->saw_first_ts = 1;
2609 if (ist->next_dts == AV_NOPTS_VALUE)
2610 ist->next_dts = ist->dts;
2611 if (ist->next_pts == AV_NOPTS_VALUE)
2612 ist->next_pts = ist->pts;
2616 av_init_packet(&avpkt);
2623 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2624 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2625 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2626 ist->next_pts = ist->pts = ist->dts;
2629 // while we have more to decode or while the decoder did output something on EOF
2630 while (ist->decoding_needed) {
2631 int64_t duration = 0;
2633 int decode_failed = 0;
2635 ist->pts = ist->next_pts;
2636 ist->dts = ist->next_dts;
2638 switch (ist->dec_ctx->codec_type) {
2639 case AVMEDIA_TYPE_AUDIO:
2640 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2643 case AVMEDIA_TYPE_VIDEO:
2644 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2646 if (!repeating || !pkt || got_output) {
2647 if (pkt && pkt->duration) {
2648 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2650 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2651 duration = ((int64_t)AV_TIME_BASE *
2652 ist->dec_ctx->framerate.den * ticks) /
2653 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2656 if(ist->dts != AV_NOPTS_VALUE && duration) {
2657 ist->next_dts += duration;
2659 ist->next_dts = AV_NOPTS_VALUE;
2663 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2665 case AVMEDIA_TYPE_SUBTITLE:
2668 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2669 if (!pkt && ret >= 0)
2676 if (ret == AVERROR_EOF) {
2682 if (decode_failed) {
2683 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2684 ist->file_index, ist->st->index, av_err2str(ret));
2686 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2687 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2689 if (!decode_failed || exit_on_error)
2695 ist->got_output = 1;
2700 // During draining, we might get multiple output frames in this loop.
2701 // ffmpeg.c does not drain the filter chain on configuration changes,
2702 // which means if we send multiple frames at once to the filters, and
2703 // one of those frames changes configuration, the buffered frames will
2704 // be lost. This can upset certain FATE tests.
2705 // Decode only 1 frame per call on EOF to appease these FATE tests.
2706 // The ideal solution would be to rewrite decoding to use the new
2707 // decoding API in a better way.
2714 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2715 /* except when looping we need to flush but not to send an EOF */
2716 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2717 int ret = send_filter_eof(ist);
2719 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2724 /* handle stream copy */
2725 if (!ist->decoding_needed) {
2726 ist->dts = ist->next_dts;
2727 switch (ist->dec_ctx->codec_type) {
2728 case AVMEDIA_TYPE_AUDIO:
2729 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2730 ist->dec_ctx->sample_rate;
2732 case AVMEDIA_TYPE_VIDEO:
2733 if (ist->framerate.num) {
2734 // TODO: Remove work-around for c99-to-c89 issue 7
2735 AVRational time_base_q = AV_TIME_BASE_Q;
2736 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2737 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2738 } else if (pkt->duration) {
2739 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2740 } else if(ist->dec_ctx->framerate.num != 0) {
2741 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2742 ist->next_dts += ((int64_t)AV_TIME_BASE *
2743 ist->dec_ctx->framerate.den * ticks) /
2744 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2748 ist->pts = ist->dts;
2749 ist->next_pts = ist->next_dts;
2751 for (i = 0; pkt && i < nb_output_streams; i++) {
2752 OutputStream *ost = output_streams[i];
2754 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2757 do_streamcopy(ist, ost, pkt);
2760 return !eof_reached;
2763 static void print_sdp(void)
2768 AVIOContext *sdp_pb;
2769 AVFormatContext **avc;
2771 for (i = 0; i < nb_output_files; i++) {
2772 if (!output_files[i]->header_written)
2776 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2779 for (i = 0, j = 0; i < nb_output_files; i++) {
2780 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2781 avc[j] = output_files[i]->ctx;
2789 av_sdp_create(avc, j, sdp, sizeof(sdp));
2791 if (!sdp_filename) {
2792 printf("SDP:\n%s\n", sdp);
2795 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2796 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2798 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2799 avio_closep(&sdp_pb);
2800 av_freep(&sdp_filename);
2808 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2811 for (i = 0; hwaccels[i].name; i++)
2812 if (hwaccels[i].pix_fmt == pix_fmt)
2813 return &hwaccels[i];
2817 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2819 InputStream *ist = s->opaque;
2820 const enum AVPixelFormat *p;
2823 for (p = pix_fmts; *p != -1; p++) {
2824 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2825 const HWAccel *hwaccel;
2827 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2830 hwaccel = get_hwaccel(*p);
2832 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2833 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2836 ret = hwaccel->init(s);
2838 if (ist->hwaccel_id == hwaccel->id) {
2839 av_log(NULL, AV_LOG_FATAL,
2840 "%s hwaccel requested for input stream #%d:%d, "
2841 "but cannot be initialized.\n", hwaccel->name,
2842 ist->file_index, ist->st->index);
2843 return AV_PIX_FMT_NONE;
2848 if (ist->hw_frames_ctx) {
2849 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2850 if (!s->hw_frames_ctx)
2851 return AV_PIX_FMT_NONE;
2854 ist->active_hwaccel_id = hwaccel->id;
2855 ist->hwaccel_pix_fmt = *p;
2862 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2864 InputStream *ist = s->opaque;
2866 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2867 return ist->hwaccel_get_buffer(s, frame, flags);
2869 return avcodec_default_get_buffer2(s, frame, flags);
2872 static int init_input_stream(int ist_index, char *error, int error_len)
2875 InputStream *ist = input_streams[ist_index];
2877 if (ist->decoding_needed) {
2878 AVCodec *codec = ist->dec;
2880 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2881 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2882 return AVERROR(EINVAL);
2885 ist->dec_ctx->opaque = ist;
2886 ist->dec_ctx->get_format = get_format;
2887 ist->dec_ctx->get_buffer2 = get_buffer;
2888 ist->dec_ctx->thread_safe_callbacks = 1;
2890 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2891 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2892 (ist->decoding_needed & DECODING_FOR_OST)) {
2893 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2894 if (ist->decoding_needed & DECODING_FOR_FILTER)
2895 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2898 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2900 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2901 * audio, and video decoders such as cuvid or mediacodec */
2902 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2904 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2905 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2906 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2907 if (ret == AVERROR_EXPERIMENTAL)
2908 abort_codec_experimental(codec, 0);
2910 snprintf(error, error_len,
2911 "Error while opening decoder for input stream "
2913 ist->file_index, ist->st->index, av_err2str(ret));
2916 assert_avoptions(ist->decoder_opts);
2919 ist->next_pts = AV_NOPTS_VALUE;
2920 ist->next_dts = AV_NOPTS_VALUE;
2925 static InputStream *get_input_stream(OutputStream *ost)
2927 if (ost->source_index >= 0)
2928 return input_streams[ost->source_index];
2932 static int compare_int64(const void *a, const void *b)
2934 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2937 /* open the muxer when all the streams are initialized */
2938 static int check_init_output_file(OutputFile *of, int file_index)
2942 for (i = 0; i < of->ctx->nb_streams; i++) {
2943 OutputStream *ost = output_streams[of->ost_index + i];
2944 if (!ost->initialized)
2948 of->ctx->interrupt_callback = int_cb;
2950 ret = avformat_write_header(of->ctx, &of->opts);
2952 av_log(NULL, AV_LOG_ERROR,
2953 "Could not write header for output file #%d "
2954 "(incorrect codec parameters ?): %s\n",
2955 file_index, av_err2str(ret));
2958 //assert_avoptions(of->opts);
2959 of->header_written = 1;
2961 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2963 if (sdp_filename || want_sdp)
2966 /* flush the muxing queues */
2967 for (i = 0; i < of->ctx->nb_streams; i++) {
2968 OutputStream *ost = output_streams[of->ost_index + i];
2970 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2971 if (!av_fifo_size(ost->muxing_queue))
2972 ost->mux_timebase = ost->st->time_base;
2974 while (av_fifo_size(ost->muxing_queue)) {
2976 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2977 write_packet(of, &pkt, ost, 1);
2984 static int init_output_bsfs(OutputStream *ost)
2989 if (!ost->nb_bitstream_filters)
2992 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2993 ctx = ost->bsf_ctx[i];
2995 ret = avcodec_parameters_copy(ctx->par_in,
2996 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3000 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3002 ret = av_bsf_init(ctx);
3004 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3005 ost->bsf_ctx[i]->filter->name);
3010 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3011 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3015 ost->st->time_base = ctx->time_base_out;
3020 static int init_output_stream_streamcopy(OutputStream *ost)
3022 OutputFile *of = output_files[ost->file_index];
3023 InputStream *ist = get_input_stream(ost);
3024 AVCodecParameters *par_dst = ost->st->codecpar;
3025 AVCodecParameters *par_src = ost->ref_par;
3028 uint32_t codec_tag = par_dst->codec_tag;
3030 av_assert0(ist && !ost->filter);
3032 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3034 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3036 av_log(NULL, AV_LOG_FATAL,
3037 "Error setting up codec context options.\n");
3040 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3043 unsigned int codec_tag_tmp;
3044 if (!of->ctx->oformat->codec_tag ||
3045 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3046 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3047 codec_tag = par_src->codec_tag;
3050 ret = avcodec_parameters_copy(par_dst, par_src);
3054 par_dst->codec_tag = codec_tag;
3056 if (!ost->frame_rate.num)
3057 ost->frame_rate = ist->framerate;
3058 ost->st->avg_frame_rate = ost->frame_rate;
3060 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3064 // copy timebase while removing common factors
3065 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3066 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3068 // copy estimated duration as a hint to the muxer
3069 if (ost->st->duration <= 0 && ist->st->duration > 0)
3070 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3073 ost->st->disposition = ist->st->disposition;
3075 if (ist->st->nb_side_data) {
3076 for (i = 0; i < ist->st->nb_side_data; i++) {
3077 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3080 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3082 return AVERROR(ENOMEM);
3083 memcpy(dst_data, sd_src->data, sd_src->size);
3087 if (ost->rotate_overridden) {
3088 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3089 sizeof(int32_t) * 9);
3091 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3094 ost->parser = av_parser_init(par_dst->codec_id);
3095 ost->parser_avctx = avcodec_alloc_context3(NULL);
3096 if (!ost->parser_avctx)
3097 return AVERROR(ENOMEM);
3099 switch (par_dst->codec_type) {
3100 case AVMEDIA_TYPE_AUDIO:
3101 if (audio_volume != 256) {
3102 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3105 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3106 par_dst->block_align= 0;
3107 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3108 par_dst->block_align= 0;
3110 case AVMEDIA_TYPE_VIDEO:
3111 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3113 av_mul_q(ost->frame_aspect_ratio,
3114 (AVRational){ par_dst->height, par_dst->width });
3115 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3116 "with stream copy may produce invalid files\n");
3118 else if (ist->st->sample_aspect_ratio.num)
3119 sar = ist->st->sample_aspect_ratio;
3121 sar = par_src->sample_aspect_ratio;
3122 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3123 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3124 ost->st->r_frame_rate = ist->st->r_frame_rate;
3128 ost->mux_timebase = ist->st->time_base;
3133 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3135 AVDictionaryEntry *e;
3137 uint8_t *encoder_string;
3138 int encoder_string_len;
3139 int format_flags = 0;
3140 int codec_flags = 0;
3142 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3145 e = av_dict_get(of->opts, "fflags", NULL, 0);
3147 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3150 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3152 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3154 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3157 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3160 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3161 encoder_string = av_mallocz(encoder_string_len);
3162 if (!encoder_string)
3165 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3166 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3168 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3169 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3170 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3171 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3174 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3175 AVCodecContext *avctx)
3178 int n = 1, i, size, index = 0;
3181 for (p = kf; *p; p++)
3185 pts = av_malloc_array(size, sizeof(*pts));
3187 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3192 for (i = 0; i < n; i++) {
3193 char *next = strchr(p, ',');
3198 if (!memcmp(p, "chapters", 8)) {
3200 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3203 if (avf->nb_chapters > INT_MAX - size ||
3204 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3206 av_log(NULL, AV_LOG_FATAL,
3207 "Could not allocate forced key frames array.\n");
3210 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3211 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3213 for (j = 0; j < avf->nb_chapters; j++) {
3214 AVChapter *c = avf->chapters[j];
3215 av_assert1(index < size);
3216 pts[index++] = av_rescale_q(c->start, c->time_base,
3217 avctx->time_base) + t;
3222 t = parse_time_or_die("force_key_frames", p, 1);
3223 av_assert1(index < size);
3224 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3231 av_assert0(index == size);
3232 qsort(pts, size, sizeof(*pts), compare_int64);
3233 ost->forced_kf_count = size;
3234 ost->forced_kf_pts = pts;
3237 static int init_output_stream_encode(OutputStream *ost)
3239 InputStream *ist = get_input_stream(ost);
3240 AVCodecContext *enc_ctx = ost->enc_ctx;
3241 AVCodecContext *dec_ctx = NULL;
3242 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3245 set_encoder_id(output_files[ost->file_index], ost);
3247 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3248 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3249 // which have to be filtered out to prevent leaking them to output files.
3250 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3253 ost->st->disposition = ist->st->disposition;
3255 dec_ctx = ist->dec_ctx;
3257 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3259 for (j = 0; j < oc->nb_streams; j++) {
3260 AVStream *st = oc->streams[j];
3261 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3264 if (j == oc->nb_streams)
3265 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3266 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3267 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3270 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3271 if (!ost->frame_rate.num)
3272 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3273 if (ist && !ost->frame_rate.num)
3274 ost->frame_rate = ist->framerate;
3275 if (ist && !ost->frame_rate.num)
3276 ost->frame_rate = ist->st->r_frame_rate;
3277 if (ist && !ost->frame_rate.num) {
3278 ost->frame_rate = (AVRational){25, 1};
3279 av_log(NULL, AV_LOG_WARNING,
3281 "about the input framerate is available. Falling "
3282 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3283 "if you want a different framerate.\n",
3284 ost->file_index, ost->index);
3286 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3287 if (ost->enc->supported_framerates && !ost->force_fps) {
3288 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3289 ost->frame_rate = ost->enc->supported_framerates[idx];
3291 // reduce frame rate for mpeg4 to be within the spec limits
3292 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3293 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3294 ost->frame_rate.num, ost->frame_rate.den, 65535);
3298 switch (enc_ctx->codec_type) {
3299 case AVMEDIA_TYPE_AUDIO:
3300 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3302 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3303 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3304 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3305 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3306 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3307 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3309 case AVMEDIA_TYPE_VIDEO:
3310 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3311 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3312 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3313 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3314 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3315 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3316 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3318 for (j = 0; j < ost->forced_kf_count; j++)
3319 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3321 enc_ctx->time_base);
3323 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3324 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3325 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3326 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3327 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3328 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3329 if (!strncmp(ost->enc->name, "libx264", 7) &&
3330 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3331 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3332 av_log(NULL, AV_LOG_WARNING,
3333 "No pixel format specified, %s for H.264 encoding chosen.\n"
3334 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3335 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3336 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3337 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3338 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3339 av_log(NULL, AV_LOG_WARNING,
3340 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3341 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3342 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3343 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3345 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3346 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3348 enc_ctx->framerate = ost->frame_rate;
3350 ost->st->avg_frame_rate = ost->frame_rate;
3353 enc_ctx->width != dec_ctx->width ||
3354 enc_ctx->height != dec_ctx->height ||
3355 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3356 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3359 if (ost->forced_keyframes) {
3360 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3361 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3362 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3364 av_log(NULL, AV_LOG_ERROR,
3365 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3368 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3369 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3370 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3371 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3373 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3374 // parse it only for static kf timings
3375 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3376 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3380 case AVMEDIA_TYPE_SUBTITLE:
3381 enc_ctx->time_base = AV_TIME_BASE_Q;
3382 if (!enc_ctx->width) {
3383 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3384 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3387 case AVMEDIA_TYPE_DATA:
3394 ost->mux_timebase = enc_ctx->time_base;
3399 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3403 if (ost->encoding_needed) {
3404 AVCodec *codec = ost->enc;
3405 AVCodecContext *dec = NULL;
3408 ret = init_output_stream_encode(ost);
3412 if ((ist = get_input_stream(ost)))
3414 if (dec && dec->subtitle_header) {
3415 /* ASS code assumes this buffer is null terminated so add extra byte. */
3416 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3417 if (!ost->enc_ctx->subtitle_header)
3418 return AVERROR(ENOMEM);
3419 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3420 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3422 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3423 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3424 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3426 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3427 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3428 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3430 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3431 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3432 av_buffersink_get_format(ost->filter->filter)) {
3433 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3434 if (!ost->enc_ctx->hw_frames_ctx)
3435 return AVERROR(ENOMEM);
3438 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3439 if (ret == AVERROR_EXPERIMENTAL)
3440 abort_codec_experimental(codec, 1);
3441 snprintf(error, error_len,
3442 "Error while opening encoder for output stream #%d:%d - "
3443 "maybe incorrect parameters such as bit_rate, rate, width or height",
3444 ost->file_index, ost->index);
3447 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3448 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3449 av_buffersink_set_frame_size(ost->filter->filter,
3450 ost->enc_ctx->frame_size);
3451 assert_avoptions(ost->encoder_opts);
3452 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3453 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3454 " It takes bits/s as argument, not kbits/s\n");
3456 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3458 av_log(NULL, AV_LOG_FATAL,
3459 "Error initializing the output stream codec context.\n");
3463 * FIXME: ost->st->codec should't be needed here anymore.
3465 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3469 if (ost->enc_ctx->nb_coded_side_data) {
3472 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3473 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3476 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3478 return AVERROR(ENOMEM);
3479 memcpy(dst_data, sd_src->data, sd_src->size);
3484 * Add global input side data. For now this is naive, and copies it
3485 * from the input stream's global side data. All side data should
3486 * really be funneled over AVFrame and libavfilter, then added back to
3487 * packet side data, and then potentially using the first packet for
3492 for (i = 0; i < ist->st->nb_side_data; i++) {
3493 AVPacketSideData *sd = &ist->st->side_data[i];
3494 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3496 return AVERROR(ENOMEM);
3497 memcpy(dst, sd->data, sd->size);
3498 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3499 av_display_rotation_set((uint32_t *)dst, 0);
3503 // copy timebase while removing common factors
3504 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3505 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3507 // copy estimated duration as a hint to the muxer
3508 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3509 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3511 ost->st->codec->codec= ost->enc_ctx->codec;
3512 } else if (ost->stream_copy) {
3513 ret = init_output_stream_streamcopy(ost);
3518 * FIXME: will the codec context used by the parser during streamcopy
3519 * This should go away with the new parser API.
3521 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3526 // parse user provided disposition, and update stream values
3527 if (ost->disposition) {
3528 static const AVOption opts[] = {
3529 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3530 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3531 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3532 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3533 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3534 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3535 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3536 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3537 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3538 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3539 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3540 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3541 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3542 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3545 static const AVClass class = {
3547 .item_name = av_default_item_name,
3549 .version = LIBAVUTIL_VERSION_INT,
3551 const AVClass *pclass = &class;
3553 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3558 /* initialize bitstream filters for the output stream
3559 * needs to be done here, because the codec id for streamcopy is not
3560 * known until now */
3561 ret = init_output_bsfs(ost);
3565 ost->initialized = 1;
3567 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3574 static void report_new_stream(int input_index, AVPacket *pkt)
3576 InputFile *file = input_files[input_index];
3577 AVStream *st = file->ctx->streams[pkt->stream_index];
3579 if (pkt->stream_index < file->nb_streams_warn)
3581 av_log(file->ctx, AV_LOG_WARNING,
3582 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3583 av_get_media_type_string(st->codecpar->codec_type),
3584 input_index, pkt->stream_index,
3585 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3586 file->nb_streams_warn = pkt->stream_index + 1;
3589 static int transcode_init(void)
3591 int ret = 0, i, j, k;
3592 AVFormatContext *oc;
3595 char error[1024] = {0};
3597 for (i = 0; i < nb_filtergraphs; i++) {
3598 FilterGraph *fg = filtergraphs[i];
3599 for (j = 0; j < fg->nb_outputs; j++) {
3600 OutputFilter *ofilter = fg->outputs[j];
3601 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3603 if (fg->nb_inputs != 1)
3605 for (k = nb_input_streams-1; k >= 0 ; k--)
3606 if (fg->inputs[0]->ist == input_streams[k])
3608 ofilter->ost->source_index = k;
3612 /* init framerate emulation */
3613 for (i = 0; i < nb_input_files; i++) {
3614 InputFile *ifile = input_files[i];
3615 if (ifile->rate_emu)
3616 for (j = 0; j < ifile->nb_streams; j++)
3617 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3620 /* init input streams */
3621 for (i = 0; i < nb_input_streams; i++)
3622 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3623 for (i = 0; i < nb_output_streams; i++) {
3624 ost = output_streams[i];
3625 avcodec_close(ost->enc_ctx);
3630 /* open each encoder */
3631 for (i = 0; i < nb_output_streams; i++) {
3632 // skip streams fed from filtergraphs until we have a frame for them
3633 if (output_streams[i]->filter)
3636 ret = init_output_stream(output_streams[i], error, sizeof(error));
3641 /* discard unused programs */
3642 for (i = 0; i < nb_input_files; i++) {
3643 InputFile *ifile = input_files[i];
3644 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3645 AVProgram *p = ifile->ctx->programs[j];
3646 int discard = AVDISCARD_ALL;
3648 for (k = 0; k < p->nb_stream_indexes; k++)
3649 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3650 discard = AVDISCARD_DEFAULT;
3653 p->discard = discard;
3657 /* write headers for files with no streams */
3658 for (i = 0; i < nb_output_files; i++) {
3659 oc = output_files[i]->ctx;
3660 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3661 ret = check_init_output_file(output_files[i], i);
3668 /* dump the stream mapping */
3669 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3670 for (i = 0; i < nb_input_streams; i++) {
3671 ist = input_streams[i];
3673 for (j = 0; j < ist->nb_filters; j++) {
3674 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3675 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3676 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3677 ist->filters[j]->name);
3678 if (nb_filtergraphs > 1)
3679 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3680 av_log(NULL, AV_LOG_INFO, "\n");
3685 for (i = 0; i < nb_output_streams; i++) {
3686 ost = output_streams[i];
3688 if (ost->attachment_filename) {
3689 /* an attached file */
3690 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3691 ost->attachment_filename, ost->file_index, ost->index);
3695 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3696 /* output from a complex graph */
3697 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3698 if (nb_filtergraphs > 1)
3699 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3701 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3702 ost->index, ost->enc ? ost->enc->name : "?");
3706 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3707 input_streams[ost->source_index]->file_index,
3708 input_streams[ost->source_index]->st->index,
3711 if (ost->sync_ist != input_streams[ost->source_index])
3712 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3713 ost->sync_ist->file_index,
3714 ost->sync_ist->st->index);
3715 if (ost->stream_copy)
3716 av_log(NULL, AV_LOG_INFO, " (copy)");
3718 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3719 const AVCodec *out_codec = ost->enc;
3720 const char *decoder_name = "?";
3721 const char *in_codec_name = "?";
3722 const char *encoder_name = "?";
3723 const char *out_codec_name = "?";
3724 const AVCodecDescriptor *desc;
3727 decoder_name = in_codec->name;
3728 desc = avcodec_descriptor_get(in_codec->id);
3730 in_codec_name = desc->name;
3731 if (!strcmp(decoder_name, in_codec_name))
3732 decoder_name = "native";
3736 encoder_name = out_codec->name;
3737 desc = avcodec_descriptor_get(out_codec->id);
3739 out_codec_name = desc->name;
3740 if (!strcmp(encoder_name, out_codec_name))
3741 encoder_name = "native";
3744 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3745 in_codec_name, decoder_name,
3746 out_codec_name, encoder_name);
3748 av_log(NULL, AV_LOG_INFO, "\n");
3752 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3756 atomic_store(&transcode_init_done, 1);
3761 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3762 static int need_output(void)
3766 for (i = 0; i < nb_output_streams; i++) {
3767 OutputStream *ost = output_streams[i];
3768 OutputFile *of = output_files[ost->file_index];
3769 AVFormatContext *os = output_files[ost->file_index]->ctx;
3771 if (ost->finished ||
3772 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3774 if (ost->frame_number >= ost->max_frames) {
3776 for (j = 0; j < of->ctx->nb_streams; j++)
3777 close_output_stream(output_streams[of->ost_index + j]);
3788 * Select the output stream to process.
3790 * @return selected output stream, or NULL if none available
3792 static OutputStream *choose_output(void)
3795 int64_t opts_min = INT64_MAX;
3796 OutputStream *ost_min = NULL;
3798 for (i = 0; i < nb_output_streams; i++) {
3799 OutputStream *ost = output_streams[i];
3800 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3801 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3803 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3804 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3806 if (!ost->initialized && !ost->inputs_done)
3809 if (!ost->finished && opts < opts_min) {
3811 ost_min = ost->unavailable ? NULL : ost;
3817 static void set_tty_echo(int on)
3821 if (tcgetattr(0, &tty) == 0) {
3822 if (on) tty.c_lflag |= ECHO;
3823 else tty.c_lflag &= ~ECHO;
3824 tcsetattr(0, TCSANOW, &tty);
3829 static int check_keyboard_interaction(int64_t cur_time)
3832 static int64_t last_time;
3833 if (received_nb_signals)
3834 return AVERROR_EXIT;
3835 /* read_key() returns 0 on EOF */
3836 if(cur_time - last_time >= 100000 && !run_as_daemon){
3838 last_time = cur_time;
3842 return AVERROR_EXIT;
3843 if (key == '+') av_log_set_level(av_log_get_level()+10);
3844 if (key == '-') av_log_set_level(av_log_get_level()-10);
3845 if (key == 's') qp_hist ^= 1;
3848 do_hex_dump = do_pkt_dump = 0;
3849 } else if(do_pkt_dump){
3853 av_log_set_level(AV_LOG_DEBUG);
3855 if (key == 'c' || key == 'C'){
3856 char buf[4096], target[64], command[256], arg[256] = {0};
3859 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3862 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3867 fprintf(stderr, "\n");
3869 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3870 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3871 target, time, command, arg);
3872 for (i = 0; i < nb_filtergraphs; i++) {
3873 FilterGraph *fg = filtergraphs[i];
3876 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3877 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3878 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3879 } else if (key == 'c') {
3880 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3881 ret = AVERROR_PATCHWELCOME;
3883 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3885 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3890 av_log(NULL, AV_LOG_ERROR,
3891 "Parse error, at least 3 arguments were expected, "
3892 "only %d given in string '%s'\n", n, buf);
3895 if (key == 'd' || key == 'D'){
3898 debug = input_streams[0]->st->codec->debug<<1;
3899 if(!debug) debug = 1;
3900 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3907 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3912 fprintf(stderr, "\n");
3913 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3914 fprintf(stderr,"error parsing debug value\n");
3916 for(i=0;i<nb_input_streams;i++) {
3917 input_streams[i]->st->codec->debug = debug;
3919 for(i=0;i<nb_output_streams;i++) {
3920 OutputStream *ost = output_streams[i];
3921 ost->enc_ctx->debug = debug;
3923 if(debug) av_log_set_level(AV_LOG_DEBUG);
3924 fprintf(stderr,"debug=%d\n", debug);
3927 fprintf(stderr, "key function\n"
3928 "? show this help\n"
3929 "+ increase verbosity\n"
3930 "- decrease verbosity\n"
3931 "c Send command to first matching filter supporting it\n"
3932 "C Send/Queue command to all matching filters\n"
3933 "D cycle through available debug modes\n"
3934 "h dump packets/hex press to cycle through the 3 states\n"
3936 "s Show QP histogram\n"
3943 static void *input_thread(void *arg)
3946 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3951 ret = av_read_frame(f->ctx, &pkt);
3953 if (ret == AVERROR(EAGAIN)) {
3958 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3961 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3962 if (flags && ret == AVERROR(EAGAIN)) {
3964 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3965 av_log(f->ctx, AV_LOG_WARNING,
3966 "Thread message queue blocking; consider raising the "
3967 "thread_queue_size option (current value: %d)\n",
3968 f->thread_queue_size);
3971 if (ret != AVERROR_EOF)
3972 av_log(f->ctx, AV_LOG_ERROR,
3973 "Unable to send packet to main thread: %s\n",
3975 av_packet_unref(&pkt);
3976 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3984 static void free_input_threads(void)
3988 for (i = 0; i < nb_input_files; i++) {
3989 InputFile *f = input_files[i];
3992 if (!f || !f->in_thread_queue)
3994 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3995 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3996 av_packet_unref(&pkt);
3998 pthread_join(f->thread, NULL);
4000 av_thread_message_queue_free(&f->in_thread_queue);
4004 static int init_input_threads(void)
4008 if (nb_input_files == 1)
4011 for (i = 0; i < nb_input_files; i++) {
4012 InputFile *f = input_files[i];
4014 if (f->ctx->pb ? !f->ctx->pb->seekable :
4015 strcmp(f->ctx->iformat->name, "lavfi"))
4016 f->non_blocking = 1;
4017 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4018 f->thread_queue_size, sizeof(AVPacket));
4022 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4023 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4024 av_thread_message_queue_free(&f->in_thread_queue);
4025 return AVERROR(ret);
4031 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4033 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4035 AV_THREAD_MESSAGE_NONBLOCK : 0);
4039 static int get_input_packet(InputFile *f, AVPacket *pkt)
4043 for (i = 0; i < f->nb_streams; i++) {
4044 InputStream *ist = input_streams[f->ist_index + i];
4045 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4046 int64_t now = av_gettime_relative() - ist->start;
4048 return AVERROR(EAGAIN);
4053 if (nb_input_files > 1)
4054 return get_input_packet_mt(f, pkt);
4056 return av_read_frame(f->ctx, pkt);
4059 static int got_eagain(void)
4062 for (i = 0; i < nb_output_streams; i++)
4063 if (output_streams[i]->unavailable)
4068 static void reset_eagain(void)
4071 for (i = 0; i < nb_input_files; i++)
4072 input_files[i]->eagain = 0;
4073 for (i = 0; i < nb_output_streams; i++)
4074 output_streams[i]->unavailable = 0;
4077 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4078 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4079 AVRational time_base)
4085 return tmp_time_base;
4088 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4091 return tmp_time_base;
4097 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4100 AVCodecContext *avctx;
4101 int i, ret, has_audio = 0;
4102 int64_t duration = 0;
4104 ret = av_seek_frame(is, -1, is->start_time, 0);
4108 for (i = 0; i < ifile->nb_streams; i++) {
4109 ist = input_streams[ifile->ist_index + i];
4110 avctx = ist->dec_ctx;
4113 if (ist->decoding_needed) {
4114 process_input_packet(ist, NULL, 1);
4115 avcodec_flush_buffers(avctx);
4118 /* duration is the length of the last frame in a stream
4119 * when audio stream is present we don't care about
4120 * last video frame length because it's not defined exactly */
4121 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4125 for (i = 0; i < ifile->nb_streams; i++) {
4126 ist = input_streams[ifile->ist_index + i];
4127 avctx = ist->dec_ctx;
4130 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4131 AVRational sample_rate = {1, avctx->sample_rate};
4133 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4137 if (ist->framerate.num) {
4138 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4139 } else if (ist->st->avg_frame_rate.num) {
4140 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4141 } else duration = 1;
4143 if (!ifile->duration)
4144 ifile->time_base = ist->st->time_base;
4145 /* the total duration of the stream, max_pts - min_pts is
4146 * the duration of the stream without the last frame */
4147 duration += ist->max_pts - ist->min_pts;
4148 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4152 if (ifile->loop > 0)
4160 * - 0 -- one packet was read and processed
4161 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4162 * this function should be called again
4163 * - AVERROR_EOF -- this function should not be called again
4165 static int process_input(int file_index)
4167 InputFile *ifile = input_files[file_index];
4168 AVFormatContext *is;
4176 ret = get_input_packet(ifile, &pkt);
4178 if (ret == AVERROR(EAGAIN)) {
4182 if (ret < 0 && ifile->loop) {
4183 if ((ret = seek_to_start(ifile, is)) < 0)
4185 ret = get_input_packet(ifile, &pkt);
4186 if (ret == AVERROR(EAGAIN)) {
4192 if (ret != AVERROR_EOF) {
4193 print_error(is->filename, ret);
4198 for (i = 0; i < ifile->nb_streams; i++) {
4199 ist = input_streams[ifile->ist_index + i];
4200 if (ist->decoding_needed) {
4201 ret = process_input_packet(ist, NULL, 0);
4206 /* mark all outputs that don't go through lavfi as finished */
4207 for (j = 0; j < nb_output_streams; j++) {
4208 OutputStream *ost = output_streams[j];
4210 if (ost->source_index == ifile->ist_index + i &&
4211 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4212 finish_output_stream(ost);
4216 ifile->eof_reached = 1;
4217 return AVERROR(EAGAIN);
4223 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4224 is->streams[pkt.stream_index]);
4226 /* the following test is needed in case new streams appear
4227 dynamically in stream : we ignore them */
4228 if (pkt.stream_index >= ifile->nb_streams) {
4229 report_new_stream(file_index, &pkt);
4230 goto discard_packet;
4233 ist = input_streams[ifile->ist_index + pkt.stream_index];
4235 ist->data_size += pkt.size;
4239 goto discard_packet;
4241 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4242 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4247 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4248 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4249 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4250 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4251 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4252 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4253 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4254 av_ts2str(input_files[ist->file_index]->ts_offset),
4255 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4258 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4259 int64_t stime, stime2;
4260 // Correcting starttime based on the enabled streams
4261 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4262 // so we instead do it here as part of discontinuity handling
4263 if ( ist->next_dts == AV_NOPTS_VALUE
4264 && ifile->ts_offset == -is->start_time
4265 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4266 int64_t new_start_time = INT64_MAX;
4267 for (i=0; i<is->nb_streams; i++) {
4268 AVStream *st = is->streams[i];
4269 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4271 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4273 if (new_start_time > is->start_time) {
4274 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4275 ifile->ts_offset = -new_start_time;
4279 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4280 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4281 ist->wrap_correction_done = 1;
4283 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4284 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4285 ist->wrap_correction_done = 0;
4287 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4288 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4289 ist->wrap_correction_done = 0;
4293 /* add the stream-global side data to the first packet */
4294 if (ist->nb_packets == 1) {
4295 for (i = 0; i < ist->st->nb_side_data; i++) {
4296 AVPacketSideData *src_sd = &ist->st->side_data[i];
4299 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4302 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4305 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4309 memcpy(dst_data, src_sd->data, src_sd->size);
4313 if (pkt.dts != AV_NOPTS_VALUE)
4314 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4315 if (pkt.pts != AV_NOPTS_VALUE)
4316 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4318 if (pkt.pts != AV_NOPTS_VALUE)
4319 pkt.pts *= ist->ts_scale;
4320 if (pkt.dts != AV_NOPTS_VALUE)
4321 pkt.dts *= ist->ts_scale;
4323 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4324 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4325 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4326 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4327 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4328 int64_t delta = pkt_dts - ifile->last_ts;
4329 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4330 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4331 ifile->ts_offset -= delta;
4332 av_log(NULL, AV_LOG_DEBUG,
4333 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4334 delta, ifile->ts_offset);
4335 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4336 if (pkt.pts != AV_NOPTS_VALUE)
4337 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4341 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4342 if (pkt.pts != AV_NOPTS_VALUE) {
4343 pkt.pts += duration;
4344 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4345 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4348 if (pkt.dts != AV_NOPTS_VALUE)
4349 pkt.dts += duration;
4351 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4352 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4353 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4354 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4356 int64_t delta = pkt_dts - ist->next_dts;
4357 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4358 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4359 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4360 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4361 ifile->ts_offset -= delta;
4362 av_log(NULL, AV_LOG_DEBUG,
4363 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4364 delta, ifile->ts_offset);
4365 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4366 if (pkt.pts != AV_NOPTS_VALUE)
4367 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4370 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4371 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4372 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4373 pkt.dts = AV_NOPTS_VALUE;
4375 if (pkt.pts != AV_NOPTS_VALUE){
4376 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4377 delta = pkt_pts - ist->next_dts;
4378 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4379 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4380 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4381 pkt.pts = AV_NOPTS_VALUE;
4387 if (pkt.dts != AV_NOPTS_VALUE)
4388 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4391 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4392 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4393 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4394 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4395 av_ts2str(input_files[ist->file_index]->ts_offset),
4396 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4399 sub2video_heartbeat(ist, pkt.pts);
4401 process_input_packet(ist, &pkt, 0);
4404 av_packet_unref(&pkt);
4410 * Perform a step of transcoding for the specified filter graph.
4412 * @param[in] graph filter graph to consider
4413 * @param[out] best_ist input stream where a frame would allow to continue
4414 * @return 0 for success, <0 for error
4416 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4419 int nb_requests, nb_requests_max = 0;
4420 InputFilter *ifilter;
4424 ret = avfilter_graph_request_oldest(graph->graph);
4426 return reap_filters(0);
4428 if (ret == AVERROR_EOF) {
4429 ret = reap_filters(1);
4430 for (i = 0; i < graph->nb_outputs; i++)
4431 close_output_stream(graph->outputs[i]->ost);
4434 if (ret != AVERROR(EAGAIN))
4437 for (i = 0; i < graph->nb_inputs; i++) {
4438 ifilter = graph->inputs[i];
4440 if (input_files[ist->file_index]->eagain ||
4441 input_files[ist->file_index]->eof_reached)
4443 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4444 if (nb_requests > nb_requests_max) {
4445 nb_requests_max = nb_requests;
4451 for (i = 0; i < graph->nb_outputs; i++)
4452 graph->outputs[i]->ost->unavailable = 1;
4458 * Run a single step of transcoding.
4460 * @return 0 for success, <0 for error
4462 static int transcode_step(void)
4465 InputStream *ist = NULL;
4468 ost = choose_output();
4475 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4479 if (ost->filter && !ost->filter->graph->graph) {
4480 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4481 ret = configure_filtergraph(ost->filter->graph);
4483 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4489 if (ost->filter && ost->filter->graph->graph) {
4490 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4494 } else if (ost->filter) {
4496 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4497 InputFilter *ifilter = ost->filter->graph->inputs[i];
4498 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4504 ost->inputs_done = 1;
4508 av_assert0(ost->source_index >= 0);
4509 ist = input_streams[ost->source_index];
4512 ret = process_input(ist->file_index);
4513 if (ret == AVERROR(EAGAIN)) {
4514 if (input_files[ist->file_index]->eagain)
4515 ost->unavailable = 1;
4520 return ret == AVERROR_EOF ? 0 : ret;
4522 return reap_filters(0);
4526 * The following code is the main loop of the file converter
4528 static int transcode(void)
4531 AVFormatContext *os;
4534 int64_t timer_start;
4535 int64_t total_packets_written = 0;
4537 ret = transcode_init();
4541 if (stdin_interaction) {
4542 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4545 timer_start = av_gettime_relative();
4548 if ((ret = init_input_threads()) < 0)
4552 while (!received_sigterm) {
4553 int64_t cur_time= av_gettime_relative();
4555 /* if 'q' pressed, exits */
4556 if (stdin_interaction)
4557 if (check_keyboard_interaction(cur_time) < 0)
4560 /* check if there's any stream where output is still needed */
4561 if (!need_output()) {
4562 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4566 ret = transcode_step();
4567 if (ret < 0 && ret != AVERROR_EOF) {
4569 av_strerror(ret, errbuf, sizeof(errbuf));
4571 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4575 /* dump report by using the output first video and audio streams */
4576 print_report(0, timer_start, cur_time);
4579 free_input_threads();
4582 /* at the end of stream, we must flush the decoder buffers */
4583 for (i = 0; i < nb_input_streams; i++) {
4584 ist = input_streams[i];
4585 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4586 process_input_packet(ist, NULL, 0);
4593 /* write the trailer if needed and close file */
4594 for (i = 0; i < nb_output_files; i++) {
4595 os = output_files[i]->ctx;
4596 if (!output_files[i]->header_written) {
4597 av_log(NULL, AV_LOG_ERROR,
4598 "Nothing was written into output file %d (%s), because "
4599 "at least one of its streams received no packets.\n",
4603 if ((ret = av_write_trailer(os)) < 0) {
4604 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4610 /* dump report by using the first video and audio streams */
4611 print_report(1, timer_start, av_gettime_relative());
4613 /* close each encoder */
4614 for (i = 0; i < nb_output_streams; i++) {
4615 ost = output_streams[i];
4616 if (ost->encoding_needed) {
4617 av_freep(&ost->enc_ctx->stats_in);
4619 total_packets_written += ost->packets_written;
4622 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4623 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4627 /* close each decoder */
4628 for (i = 0; i < nb_input_streams; i++) {
4629 ist = input_streams[i];
4630 if (ist->decoding_needed) {
4631 avcodec_close(ist->dec_ctx);
4632 if (ist->hwaccel_uninit)
4633 ist->hwaccel_uninit(ist->dec_ctx);
4637 av_buffer_unref(&hw_device_ctx);
4644 free_input_threads();
4647 if (output_streams) {
4648 for (i = 0; i < nb_output_streams; i++) {
4649 ost = output_streams[i];
4652 if (fclose(ost->logfile))
4653 av_log(NULL, AV_LOG_ERROR,
4654 "Error closing logfile, loss of information possible: %s\n",
4655 av_err2str(AVERROR(errno)));
4656 ost->logfile = NULL;
4658 av_freep(&ost->forced_kf_pts);
4659 av_freep(&ost->apad);
4660 av_freep(&ost->disposition);
4661 av_dict_free(&ost->encoder_opts);
4662 av_dict_free(&ost->sws_dict);
4663 av_dict_free(&ost->swr_opts);
4664 av_dict_free(&ost->resample_opts);
4672 static int64_t getutime(void)
4675 struct rusage rusage;
4677 getrusage(RUSAGE_SELF, &rusage);
4678 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4679 #elif HAVE_GETPROCESSTIMES
4681 FILETIME c, e, k, u;
4682 proc = GetCurrentProcess();
4683 GetProcessTimes(proc, &c, &e, &k, &u);
4684 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4686 return av_gettime_relative();
4690 static int64_t getmaxrss(void)
4692 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4693 struct rusage rusage;
4694 getrusage(RUSAGE_SELF, &rusage);
4695 return (int64_t)rusage.ru_maxrss * 1024;
4696 #elif HAVE_GETPROCESSMEMORYINFO
4698 PROCESS_MEMORY_COUNTERS memcounters;
4699 proc = GetCurrentProcess();
4700 memcounters.cb = sizeof(memcounters);
4701 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4702 return memcounters.PeakPagefileUsage;
4708 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4712 int main(int argc, char **argv)
4719 register_exit(ffmpeg_cleanup);
4721 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4723 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4724 parse_loglevel(argc, argv, options);
4726 if(argc>1 && !strcmp(argv[1], "-d")){
4728 av_log_set_callback(log_callback_null);
4733 avcodec_register_all();
4735 avdevice_register_all();
4737 avfilter_register_all();
4739 avformat_network_init();
4741 show_banner(argc, argv, options);
4743 /* parse options and open all input/output files */
4744 ret = ffmpeg_parse_options(argc, argv);
4748 if (nb_output_files <= 0 && nb_input_files == 0) {
4750 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4754 /* file converter / grab */
4755 if (nb_output_files <= 0) {
4756 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4760 // if (nb_input_files == 0) {
4761 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4765 for (i = 0; i < nb_output_files; i++) {
4766 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4770 current_time = ti = getutime();
4771 if (transcode() < 0)
4773 ti = getutime() - ti;
4775 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4777 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4778 decode_error_stat[0], decode_error_stat[1]);
4779 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4782 exit_program(received_nb_signals ? 255 : main_return_code);
4783 return main_return_code;