2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_free(fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
539 av_freep(&ost->bsf_extradata_updated);
541 av_frame_free(&ost->filtered_frame);
542 av_frame_free(&ost->last_frame);
543 av_dict_free(&ost->encoder_opts);
545 av_parser_close(ost->parser);
546 avcodec_free_context(&ost->parser_avctx);
548 av_freep(&ost->forced_keyframes);
549 av_expr_free(ost->forced_keyframes_pexpr);
550 av_freep(&ost->avfilter);
551 av_freep(&ost->logfile_prefix);
553 av_freep(&ost->audio_channels_map);
554 ost->audio_channels_mapped = 0;
556 av_dict_free(&ost->sws_dict);
558 avcodec_free_context(&ost->enc_ctx);
559 avcodec_parameters_free(&ost->ref_par);
561 if (ost->muxing_queue) {
562 while (av_fifo_size(ost->muxing_queue)) {
564 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565 av_packet_unref(&pkt);
567 av_fifo_freep(&ost->muxing_queue);
570 av_freep(&output_streams[i]);
573 free_input_threads();
575 for (i = 0; i < nb_input_files; i++) {
576 avformat_close_input(&input_files[i]->ctx);
577 av_freep(&input_files[i]);
579 for (i = 0; i < nb_input_streams; i++) {
580 InputStream *ist = input_streams[i];
582 av_frame_free(&ist->decoded_frame);
583 av_frame_free(&ist->filter_frame);
584 av_dict_free(&ist->decoder_opts);
585 avsubtitle_free(&ist->prev_sub.subtitle);
586 av_frame_free(&ist->sub2video.frame);
587 av_freep(&ist->filters);
588 av_freep(&ist->hwaccel_device);
589 av_freep(&ist->dts_buffer);
591 avcodec_free_context(&ist->dec_ctx);
593 av_freep(&input_streams[i]);
597 if (fclose(vstats_file))
598 av_log(NULL, AV_LOG_ERROR,
599 "Error closing vstats file, loss of information possible: %s\n",
600 av_err2str(AVERROR(errno)));
602 av_freep(&vstats_filename);
604 av_freep(&input_streams);
605 av_freep(&input_files);
606 av_freep(&output_streams);
607 av_freep(&output_files);
611 avformat_network_deinit();
613 if (received_sigterm) {
614 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615 (int) received_sigterm);
616 } else if (ret && atomic_load(&transcode_init_done)) {
617 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
623 void remove_avoptions(AVDictionary **a, AVDictionary *b)
625 AVDictionaryEntry *t = NULL;
627 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
632 void assert_avoptions(AVDictionary *m)
634 AVDictionaryEntry *t;
635 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
641 static void abort_codec_experimental(AVCodec *c, int encoder)
646 static void update_benchmark(const char *fmt, ...)
648 if (do_benchmark_all) {
649 int64_t t = getutime();
655 vsnprintf(buf, sizeof(buf), fmt, va);
657 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
663 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
666 for (i = 0; i < nb_output_streams; i++) {
667 OutputStream *ost2 = output_streams[i];
668 ost2->finished |= ost == ost2 ? this_stream : others;
672 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
674 AVFormatContext *s = of->ctx;
675 AVStream *st = ost->st;
678 if (!of->header_written) {
679 AVPacket tmp_pkt = {0};
680 /* the muxer is not initialized yet, buffer the packet */
681 if (!av_fifo_space(ost->muxing_queue)) {
682 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
683 ost->max_muxing_queue_size);
684 if (new_size <= av_fifo_size(ost->muxing_queue)) {
685 av_log(NULL, AV_LOG_ERROR,
686 "Too many packets buffered for output stream %d:%d.\n",
687 ost->file_index, ost->st->index);
690 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
694 ret = av_packet_ref(&tmp_pkt, pkt);
697 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
698 av_packet_unref(pkt);
702 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
703 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
704 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
707 * Audio encoders may split the packets -- #frames in != #packets out.
708 * But there is no reordering, so we can limit the number of output packets
709 * by simply dropping them here.
710 * Counting encoded video frames needs to be done separately because of
711 * reordering, see do_video_out()
713 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
714 if (ost->frame_number >= ost->max_frames) {
715 av_packet_unref(pkt);
720 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
722 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
724 ost->quality = sd ? AV_RL32(sd) : -1;
725 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
727 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
729 ost->error[i] = AV_RL64(sd + 8 + 8*i);
734 if (ost->frame_rate.num && ost->is_cfr) {
735 if (pkt->duration > 0)
736 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
737 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
742 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
744 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
745 if (pkt->dts != AV_NOPTS_VALUE &&
746 pkt->pts != AV_NOPTS_VALUE &&
747 pkt->dts > pkt->pts) {
748 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
750 ost->file_index, ost->st->index);
752 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
753 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
754 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
756 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
757 pkt->dts != AV_NOPTS_VALUE &&
758 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
759 ost->last_mux_dts != AV_NOPTS_VALUE) {
760 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
761 if (pkt->dts < max) {
762 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
763 av_log(s, loglevel, "Non-monotonous DTS in output stream "
764 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
765 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
767 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
770 av_log(s, loglevel, "changing to %"PRId64". This may result "
771 "in incorrect timestamps in the output file.\n",
773 if (pkt->pts >= pkt->dts)
774 pkt->pts = FFMAX(pkt->pts, max);
779 ost->last_mux_dts = pkt->dts;
781 ost->data_size += pkt->size;
782 ost->packets_written++;
784 pkt->stream_index = ost->index;
787 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
788 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
789 av_get_media_type_string(ost->enc_ctx->codec_type),
790 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
791 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
796 ret = av_interleaved_write_frame(s, pkt);
798 print_error("av_interleaved_write_frame()", ret);
799 main_return_code = 1;
800 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
802 av_packet_unref(pkt);
805 static void close_output_stream(OutputStream *ost)
807 OutputFile *of = output_files[ost->file_index];
809 ost->finished |= ENCODER_FINISHED;
811 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
812 of->recording_time = FFMIN(of->recording_time, end);
816 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
820 /* apply the output bitstream filters, if any */
821 if (ost->nb_bitstream_filters) {
824 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
830 /* get a packet from the previous filter up the chain */
831 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
832 if (ret == AVERROR(EAGAIN)) {
838 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
839 * the api states this shouldn't happen after init(). Propagate it here to the
840 * muxer and to the next filters in the chain to workaround this.
841 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
842 * par_out->extradata and adapt muxers accordingly to get rid of this. */
843 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
844 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
847 ost->bsf_extradata_updated[idx - 1] |= 1;
850 /* send it to the next filter down the chain or to the muxer */
851 if (idx < ost->nb_bitstream_filters) {
852 /* HACK/FIXME! - See above */
853 if (!(ost->bsf_extradata_updated[idx] & 2)) {
854 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
857 ost->bsf_extradata_updated[idx] |= 2;
859 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
864 write_packet(of, pkt, ost);
867 write_packet(of, pkt, ost);
870 if (ret < 0 && ret != AVERROR_EOF) {
871 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
872 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
878 static int check_recording_time(OutputStream *ost)
880 OutputFile *of = output_files[ost->file_index];
882 if (of->recording_time != INT64_MAX &&
883 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
884 AV_TIME_BASE_Q) >= 0) {
885 close_output_stream(ost);
891 static void do_audio_out(OutputFile *of, OutputStream *ost,
894 AVCodecContext *enc = ost->enc_ctx;
898 av_init_packet(&pkt);
902 if (!check_recording_time(ost))
905 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
906 frame->pts = ost->sync_opts;
907 ost->sync_opts = frame->pts + frame->nb_samples;
908 ost->samples_encoded += frame->nb_samples;
909 ost->frames_encoded++;
911 av_assert0(pkt.size || !pkt.data);
912 update_benchmark(NULL);
914 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
915 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
916 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
917 enc->time_base.num, enc->time_base.den);
920 ret = avcodec_send_frame(enc, frame);
925 ret = avcodec_receive_packet(enc, &pkt);
926 if (ret == AVERROR(EAGAIN))
931 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
933 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
936 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
937 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
938 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
939 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
942 output_packet(of, &pkt, ost);
947 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
951 static void do_subtitle_out(OutputFile *of,
955 int subtitle_out_max_size = 1024 * 1024;
956 int subtitle_out_size, nb, i;
961 if (sub->pts == AV_NOPTS_VALUE) {
962 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
971 subtitle_out = av_malloc(subtitle_out_max_size);
973 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
978 /* Note: DVB subtitle need one packet to draw them and one other
979 packet to clear them */
980 /* XXX: signal it in the codec context ? */
981 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
986 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
988 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
989 pts -= output_files[ost->file_index]->start_time;
990 for (i = 0; i < nb; i++) {
991 unsigned save_num_rects = sub->num_rects;
993 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
994 if (!check_recording_time(ost))
998 // start_display_time is required to be 0
999 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1000 sub->end_display_time -= sub->start_display_time;
1001 sub->start_display_time = 0;
1005 ost->frames_encoded++;
1007 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1008 subtitle_out_max_size, sub);
1010 sub->num_rects = save_num_rects;
1011 if (subtitle_out_size < 0) {
1012 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1016 av_init_packet(&pkt);
1017 pkt.data = subtitle_out;
1018 pkt.size = subtitle_out_size;
1019 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1020 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1021 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1022 /* XXX: the pts correction is handled here. Maybe handling
1023 it in the codec would be better */
1025 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030 output_packet(of, &pkt, ost);
1034 static void do_video_out(OutputFile *of,
1036 AVFrame *next_picture,
1039 int ret, format_video_sync;
1041 AVCodecContext *enc = ost->enc_ctx;
1042 AVCodecParameters *mux_par = ost->st->codecpar;
1043 AVRational frame_rate;
1044 int nb_frames, nb0_frames, i;
1045 double delta, delta0;
1046 double duration = 0;
1048 InputStream *ist = NULL;
1049 AVFilterContext *filter = ost->filter->filter;
1051 if (ost->source_index >= 0)
1052 ist = input_streams[ost->source_index];
1054 frame_rate = av_buffersink_get_frame_rate(filter);
1055 if (frame_rate.num > 0 && frame_rate.den > 0)
1056 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1058 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1059 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1061 if (!ost->filters_script &&
1065 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1066 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1069 if (!next_picture) {
1071 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1072 ost->last_nb0_frames[1],
1073 ost->last_nb0_frames[2]);
1075 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1076 delta = delta0 + duration;
1078 /* by default, we output a single frame */
1079 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1082 format_video_sync = video_sync_method;
1083 if (format_video_sync == VSYNC_AUTO) {
1084 if(!strcmp(of->ctx->oformat->name, "avi")) {
1085 format_video_sync = VSYNC_VFR;
1087 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1089 && format_video_sync == VSYNC_CFR
1090 && input_files[ist->file_index]->ctx->nb_streams == 1
1091 && input_files[ist->file_index]->input_ts_offset == 0) {
1092 format_video_sync = VSYNC_VSCFR;
1094 if (format_video_sync == VSYNC_CFR && copy_ts) {
1095 format_video_sync = VSYNC_VSCFR;
1098 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1102 format_video_sync != VSYNC_PASSTHROUGH &&
1103 format_video_sync != VSYNC_DROP) {
1104 if (delta0 < -0.6) {
1105 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1107 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1108 sync_ipts = ost->sync_opts;
1113 switch (format_video_sync) {
1115 if (ost->frame_number == 0 && delta0 >= 0.5) {
1116 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1119 ost->sync_opts = lrint(sync_ipts);
1122 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1123 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1125 } else if (delta < -1.1)
1127 else if (delta > 1.1) {
1128 nb_frames = lrintf(delta);
1130 nb0_frames = lrintf(delta0 - 0.6);
1136 else if (delta > 0.6)
1137 ost->sync_opts = lrint(sync_ipts);
1140 case VSYNC_PASSTHROUGH:
1141 ost->sync_opts = lrint(sync_ipts);
1148 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1149 nb0_frames = FFMIN(nb0_frames, nb_frames);
1151 memmove(ost->last_nb0_frames + 1,
1152 ost->last_nb0_frames,
1153 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1154 ost->last_nb0_frames[0] = nb0_frames;
1156 if (nb0_frames == 0 && ost->last_dropped) {
1158 av_log(NULL, AV_LOG_VERBOSE,
1159 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1160 ost->frame_number, ost->st->index, ost->last_frame->pts);
1162 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1163 if (nb_frames > dts_error_threshold * 30) {
1164 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1168 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1169 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1170 if (nb_frames_dup > dup_warning) {
1171 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1175 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1177 /* duplicates frame if needed */
1178 for (i = 0; i < nb_frames; i++) {
1179 AVFrame *in_picture;
1180 av_init_packet(&pkt);
1184 if (i < nb0_frames && ost->last_frame) {
1185 in_picture = ost->last_frame;
1187 in_picture = next_picture;
1192 in_picture->pts = ost->sync_opts;
1195 if (!check_recording_time(ost))
1197 if (ost->frame_number >= ost->max_frames)
1201 #if FF_API_LAVF_FMT_RAWPICTURE
1202 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1203 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1204 /* raw pictures are written as AVPicture structure to
1205 avoid any copies. We support temporarily the older
1207 if (in_picture->interlaced_frame)
1208 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1210 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1211 pkt.data = (uint8_t *)in_picture;
1212 pkt.size = sizeof(AVPicture);
1213 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1214 pkt.flags |= AV_PKT_FLAG_KEY;
1216 output_packet(of, &pkt, ost);
1220 int forced_keyframe = 0;
1223 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1224 ost->top_field_first >= 0)
1225 in_picture->top_field_first = !!ost->top_field_first;
1227 if (in_picture->interlaced_frame) {
1228 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1229 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1231 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1233 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1235 in_picture->quality = enc->global_quality;
1236 in_picture->pict_type = 0;
1238 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1239 in_picture->pts * av_q2d(enc->time_base) : NAN;
1240 if (ost->forced_kf_index < ost->forced_kf_count &&
1241 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1242 ost->forced_kf_index++;
1243 forced_keyframe = 1;
1244 } else if (ost->forced_keyframes_pexpr) {
1246 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1247 res = av_expr_eval(ost->forced_keyframes_pexpr,
1248 ost->forced_keyframes_expr_const_values, NULL);
1249 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1250 ost->forced_keyframes_expr_const_values[FKF_N],
1251 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1252 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1253 ost->forced_keyframes_expr_const_values[FKF_T],
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1257 forced_keyframe = 1;
1258 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1259 ost->forced_keyframes_expr_const_values[FKF_N];
1260 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1261 ost->forced_keyframes_expr_const_values[FKF_T];
1262 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1265 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1266 } else if ( ost->forced_keyframes
1267 && !strncmp(ost->forced_keyframes, "source", 6)
1268 && in_picture->key_frame==1) {
1269 forced_keyframe = 1;
1272 if (forced_keyframe) {
1273 in_picture->pict_type = AV_PICTURE_TYPE_I;
1274 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1277 update_benchmark(NULL);
1279 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1280 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1281 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1282 enc->time_base.num, enc->time_base.den);
1285 ost->frames_encoded++;
1287 ret = avcodec_send_frame(enc, in_picture);
1292 ret = avcodec_receive_packet(enc, &pkt);
1293 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1294 if (ret == AVERROR(EAGAIN))
1300 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1301 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1302 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1303 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1306 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1307 pkt.pts = ost->sync_opts;
1309 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1312 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1313 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1314 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1315 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1318 frame_size = pkt.size;
1319 output_packet(of, &pkt, ost);
1321 /* if two pass, output log */
1322 if (ost->logfile && enc->stats_out) {
1323 fprintf(ost->logfile, "%s", enc->stats_out);
1329 * For video, number of frames in == number of packets out.
1330 * But there may be reordering, so we can't throw away frames on encoder
1331 * flush, we need to limit them here, before they go into encoder.
1333 ost->frame_number++;
1335 if (vstats_filename && frame_size)
1336 do_video_stats(ost, frame_size);
1339 if (!ost->last_frame)
1340 ost->last_frame = av_frame_alloc();
1341 av_frame_unref(ost->last_frame);
1342 if (next_picture && ost->last_frame)
1343 av_frame_ref(ost->last_frame, next_picture);
1345 av_frame_free(&ost->last_frame);
1349 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1353 static double psnr(double d)
1355 return -10.0 * log10(d);
1358 static void do_video_stats(OutputStream *ost, int frame_size)
1360 AVCodecContext *enc;
1362 double ti1, bitrate, avg_bitrate;
1364 /* this is executed just the first time do_video_stats is called */
1366 vstats_file = fopen(vstats_filename, "w");
1374 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1375 frame_number = ost->st->nb_frames;
1376 if (vstats_version <= 1) {
1377 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1378 ost->quality / (float)FF_QP2LAMBDA);
1380 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1381 ost->quality / (float)FF_QP2LAMBDA);
1384 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1385 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 fprintf(vstats_file,"f_size= %6d ", frame_size);
1388 /* compute pts value */
1389 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1393 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1394 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1395 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1396 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1397 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1401 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 static void finish_output_stream(OutputStream *ost)
1405 OutputFile *of = output_files[ost->file_index];
1408 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1411 for (i = 0; i < of->ctx->nb_streams; i++)
1412 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1417 * Get and encode new output from any of the filtergraphs, without causing
1420 * @return 0 for success, <0 for severe errors
1422 static int reap_filters(int flush)
1424 AVFrame *filtered_frame = NULL;
1427 /* Reap all buffers present in the buffer sinks */
1428 for (i = 0; i < nb_output_streams; i++) {
1429 OutputStream *ost = output_streams[i];
1430 OutputFile *of = output_files[ost->file_index];
1431 AVFilterContext *filter;
1432 AVCodecContext *enc = ost->enc_ctx;
1435 if (!ost->filter || !ost->filter->graph->graph)
1437 filter = ost->filter->filter;
1439 if (!ost->initialized) {
1440 char error[1024] = "";
1441 ret = init_output_stream(ost, error, sizeof(error));
1443 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1444 ost->file_index, ost->index, error);
1449 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1450 return AVERROR(ENOMEM);
1452 filtered_frame = ost->filtered_frame;
1455 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1456 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1457 AV_BUFFERSINK_FLAG_NO_REQUEST);
1459 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1460 av_log(NULL, AV_LOG_WARNING,
1461 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1462 } else if (flush && ret == AVERROR_EOF) {
1463 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1464 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1468 if (ost->finished) {
1469 av_frame_unref(filtered_frame);
1472 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1473 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1474 AVRational filter_tb = av_buffersink_get_time_base(filter);
1475 AVRational tb = enc->time_base;
1476 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 tb.den <<= extra_bits;
1480 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1481 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1482 float_pts /= 1 << extra_bits;
1483 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1484 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 filtered_frame->pts =
1487 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1488 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1490 //if (ost->source_index >= 0)
1491 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1493 switch (av_buffersink_get_type(filter)) {
1494 case AVMEDIA_TYPE_VIDEO:
1495 if (!ost->frame_aspect_ratio.num)
1496 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1499 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1502 enc->time_base.num, enc->time_base.den);
1505 do_video_out(of, ost, filtered_frame, float_pts);
1507 case AVMEDIA_TYPE_AUDIO:
1508 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509 enc->channels != av_frame_get_channels(filtered_frame)) {
1510 av_log(NULL, AV_LOG_ERROR,
1511 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1514 do_audio_out(of, ost, filtered_frame);
1517 // TODO support subtitle filters
1521 av_frame_unref(filtered_frame);
1528 static void print_final_stats(int64_t total_size)
1530 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531 uint64_t subtitle_size = 0;
1532 uint64_t data_size = 0;
1533 float percent = -1.0;
1537 for (i = 0; i < nb_output_streams; i++) {
1538 OutputStream *ost = output_streams[i];
1539 switch (ost->enc_ctx->codec_type) {
1540 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543 default: other_size += ost->data_size; break;
1545 extra_size += ost->enc_ctx->extradata_size;
1546 data_size += ost->data_size;
1547 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1548 != AV_CODEC_FLAG_PASS1)
1552 if (data_size && total_size>0 && total_size >= data_size)
1553 percent = 100.0 * (total_size - data_size) / data_size;
1555 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556 video_size / 1024.0,
1557 audio_size / 1024.0,
1558 subtitle_size / 1024.0,
1559 other_size / 1024.0,
1560 extra_size / 1024.0);
1562 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1564 av_log(NULL, AV_LOG_INFO, "unknown");
1565 av_log(NULL, AV_LOG_INFO, "\n");
1567 /* print verbose per-stream stats */
1568 for (i = 0; i < nb_input_files; i++) {
1569 InputFile *f = input_files[i];
1570 uint64_t total_packets = 0, total_size = 0;
1572 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573 i, f->ctx->filename);
1575 for (j = 0; j < f->nb_streams; j++) {
1576 InputStream *ist = input_streams[f->ist_index + j];
1577 enum AVMediaType type = ist->dec_ctx->codec_type;
1579 total_size += ist->data_size;
1580 total_packets += ist->nb_packets;
1582 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583 i, j, media_type_string(type));
1584 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585 ist->nb_packets, ist->data_size);
1587 if (ist->decoding_needed) {
1588 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589 ist->frames_decoded);
1590 if (type == AVMEDIA_TYPE_AUDIO)
1591 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592 av_log(NULL, AV_LOG_VERBOSE, "; ");
1595 av_log(NULL, AV_LOG_VERBOSE, "\n");
1598 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599 total_packets, total_size);
1602 for (i = 0; i < nb_output_files; i++) {
1603 OutputFile *of = output_files[i];
1604 uint64_t total_packets = 0, total_size = 0;
1606 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607 i, of->ctx->filename);
1609 for (j = 0; j < of->ctx->nb_streams; j++) {
1610 OutputStream *ost = output_streams[of->ost_index + j];
1611 enum AVMediaType type = ost->enc_ctx->codec_type;
1613 total_size += ost->data_size;
1614 total_packets += ost->packets_written;
1616 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617 i, j, media_type_string(type));
1618 if (ost->encoding_needed) {
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620 ost->frames_encoded);
1621 if (type == AVMEDIA_TYPE_AUDIO)
1622 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623 av_log(NULL, AV_LOG_VERBOSE, "; ");
1626 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627 ost->packets_written, ost->data_size);
1629 av_log(NULL, AV_LOG_VERBOSE, "\n");
1632 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633 total_packets, total_size);
1635 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1638 av_log(NULL, AV_LOG_WARNING, "\n");
1640 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1648 AVBPrint buf_script;
1650 AVFormatContext *oc;
1652 AVCodecContext *enc;
1653 int frame_number, vid, i;
1656 int64_t pts = INT64_MIN + 1;
1657 static int64_t last_time = -1;
1658 static int qp_histogram[52];
1659 int hours, mins, secs, us;
1663 if (!print_stats && !is_last_report && !progress_avio)
1666 if (!is_last_report) {
1667 if (last_time == -1) {
1668 last_time = cur_time;
1671 if ((cur_time - last_time) < 500000)
1673 last_time = cur_time;
1676 t = (cur_time-timer_start) / 1000000.0;
1679 oc = output_files[0]->ctx;
1681 total_size = avio_size(oc->pb);
1682 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683 total_size = avio_tell(oc->pb);
1687 av_bprint_init(&buf_script, 0, 1);
1688 for (i = 0; i < nb_output_streams; i++) {
1690 ost = output_streams[i];
1692 if (!ost->stream_copy)
1693 q = ost->quality / (float) FF_QP2LAMBDA;
1695 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1697 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698 ost->file_index, ost->index, q);
1700 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1703 frame_number = ost->frame_number;
1704 fps = t > 1 ? frame_number / t : 0;
1705 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1706 frame_number, fps < 9.95, fps, q);
1707 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1709 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710 ost->file_index, ost->index, q);
1712 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1716 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1718 for (j = 0; j < 32; j++)
1719 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1722 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1724 double error, error_sum = 0;
1725 double scale, scale_sum = 0;
1727 char type[3] = { 'Y','U','V' };
1728 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1729 for (j = 0; j < 3; j++) {
1730 if (is_last_report) {
1731 error = enc->error[j];
1732 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1734 error = ost->error[j];
1735 scale = enc->width * enc->height * 255.0 * 255.0;
1741 p = psnr(error / scale);
1742 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1743 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744 ost->file_index, ost->index, type[j] | 32, p);
1746 p = psnr(error_sum / scale_sum);
1747 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1748 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749 ost->file_index, ost->index, p);
1753 /* compute min output value */
1754 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1755 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1756 ost->st->time_base, AV_TIME_BASE_Q));
1758 nb_frames_drop += ost->last_dropped;
1761 secs = FFABS(pts) / AV_TIME_BASE;
1762 us = FFABS(pts) % AV_TIME_BASE;
1768 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1769 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1773 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 "size=%8.0fkB time=", total_size / 1024.0);
1776 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1777 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1778 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1779 (100 * us) / AV_TIME_BASE);
1782 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1783 av_bprintf(&buf_script, "bitrate=N/A\n");
1785 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1786 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1789 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1792 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1793 hours, mins, secs, us);
1795 if (nb_frames_dup || nb_frames_drop)
1796 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1797 nb_frames_dup, nb_frames_drop);
1798 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1799 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1802 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1803 av_bprintf(&buf_script, "speed=N/A\n");
1805 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1806 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1809 if (print_stats || is_last_report) {
1810 const char end = is_last_report ? '\n' : '\r';
1811 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1812 fprintf(stderr, "%s %c", buf, end);
1814 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1819 if (progress_avio) {
1820 av_bprintf(&buf_script, "progress=%s\n",
1821 is_last_report ? "end" : "continue");
1822 avio_write(progress_avio, buf_script.str,
1823 FFMIN(buf_script.len, buf_script.size - 1));
1824 avio_flush(progress_avio);
1825 av_bprint_finalize(&buf_script, NULL);
1826 if (is_last_report) {
1827 if ((ret = avio_closep(&progress_avio)) < 0)
1828 av_log(NULL, AV_LOG_ERROR,
1829 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1834 print_final_stats(total_size);
1837 static void flush_encoders(void)
1841 for (i = 0; i < nb_output_streams; i++) {
1842 OutputStream *ost = output_streams[i];
1843 AVCodecContext *enc = ost->enc_ctx;
1844 OutputFile *of = output_files[ost->file_index];
1846 if (!ost->encoding_needed)
1849 // Try to enable encoding with no input frames.
1850 // Maybe we should just let encoding fail instead.
1851 if (!ost->initialized) {
1852 FilterGraph *fg = ost->filter->graph;
1853 char error[1024] = "";
1855 av_log(NULL, AV_LOG_WARNING,
1856 "Finishing stream %d:%d without any data written to it.\n",
1857 ost->file_index, ost->st->index);
1859 if (ost->filter && !fg->graph) {
1861 for (x = 0; x < fg->nb_inputs; x++) {
1862 InputFilter *ifilter = fg->inputs[x];
1863 if (ifilter->format < 0) {
1864 AVCodecParameters *par = ifilter->ist->st->codecpar;
1865 // We never got any input. Set a fake format, which will
1866 // come from libavformat.
1867 ifilter->format = par->format;
1868 ifilter->sample_rate = par->sample_rate;
1869 ifilter->channels = par->channels;
1870 ifilter->channel_layout = par->channel_layout;
1871 ifilter->width = par->width;
1872 ifilter->height = par->height;
1873 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1877 if (!ifilter_has_all_input_formats(fg))
1880 ret = configure_filtergraph(fg);
1882 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1886 finish_output_stream(ost);
1889 ret = init_output_stream(ost, error, sizeof(error));
1891 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1892 ost->file_index, ost->index, error);
1897 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1899 #if FF_API_LAVF_FMT_RAWPICTURE
1900 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1904 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1907 avcodec_send_frame(enc, NULL);
1910 const char *desc = NULL;
1914 switch (enc->codec_type) {
1915 case AVMEDIA_TYPE_AUDIO:
1918 case AVMEDIA_TYPE_VIDEO:
1925 av_init_packet(&pkt);
1929 update_benchmark(NULL);
1930 ret = avcodec_receive_packet(enc, &pkt);
1931 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1932 if (ret < 0 && ret != AVERROR_EOF) {
1933 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1938 if (ost->logfile && enc->stats_out) {
1939 fprintf(ost->logfile, "%s", enc->stats_out);
1941 if (ret == AVERROR_EOF) {
1944 if (ost->finished & MUXER_FINISHED) {
1945 av_packet_unref(&pkt);
1948 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1949 pkt_size = pkt.size;
1950 output_packet(of, &pkt, ost);
1951 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1952 do_video_stats(ost, pkt_size);
1959 * Check whether a packet from ist should be written into ost at this time
1961 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1963 OutputFile *of = output_files[ost->file_index];
1964 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1966 if (ost->source_index != ist_index)
1972 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1978 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1980 OutputFile *of = output_files[ost->file_index];
1981 InputFile *f = input_files [ist->file_index];
1982 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1983 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1987 av_init_packet(&opkt);
1989 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1990 !ost->copy_initial_nonkeyframes)
1993 if (!ost->frame_number && !ost->copy_prior_start) {
1994 int64_t comp_start = start_time;
1995 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1996 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1997 if (pkt->pts == AV_NOPTS_VALUE ?
1998 ist->pts < comp_start :
1999 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2003 if (of->recording_time != INT64_MAX &&
2004 ist->pts >= of->recording_time + start_time) {
2005 close_output_stream(ost);
2009 if (f->recording_time != INT64_MAX) {
2010 start_time = f->ctx->start_time;
2011 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2012 start_time += f->start_time;
2013 if (ist->pts >= f->recording_time + start_time) {
2014 close_output_stream(ost);
2019 /* force the input stream PTS */
2020 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2023 if (pkt->pts != AV_NOPTS_VALUE)
2024 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2026 opkt.pts = AV_NOPTS_VALUE;
2028 if (pkt->dts == AV_NOPTS_VALUE)
2029 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2031 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2032 opkt.dts -= ost_tb_start_time;
2034 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2035 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2037 duration = ist->dec_ctx->frame_size;
2038 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2039 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2040 ost->mux_timebase) - ost_tb_start_time;
2043 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2045 opkt.flags = pkt->flags;
2046 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2047 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2048 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2049 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2050 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2052 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2053 &opkt.data, &opkt.size,
2054 pkt->data, pkt->size,
2055 pkt->flags & AV_PKT_FLAG_KEY);
2057 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2062 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2067 opkt.data = pkt->data;
2068 opkt.size = pkt->size;
2070 av_copy_packet_side_data(&opkt, pkt);
2072 #if FF_API_LAVF_FMT_RAWPICTURE
2073 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2074 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2075 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2076 /* store AVPicture in AVPacket, as expected by the output format */
2077 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2079 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2083 opkt.data = (uint8_t *)&pict;
2084 opkt.size = sizeof(AVPicture);
2085 opkt.flags |= AV_PKT_FLAG_KEY;
2089 output_packet(of, &opkt, ost);
2092 int guess_input_channel_layout(InputStream *ist)
2094 AVCodecContext *dec = ist->dec_ctx;
2096 if (!dec->channel_layout) {
2097 char layout_name[256];
2099 if (dec->channels > ist->guess_layout_max)
2101 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2102 if (!dec->channel_layout)
2104 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2105 dec->channels, dec->channel_layout);
2106 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2107 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2112 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2114 if (*got_output || ret<0)
2115 decode_error_stat[ret<0] ++;
2117 if (ret < 0 && exit_on_error)
2120 if (exit_on_error && *got_output && ist) {
2121 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2122 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2128 // Filters can be configured only if the formats of all inputs are known.
2129 static int ifilter_has_all_input_formats(FilterGraph *fg)
2132 for (i = 0; i < fg->nb_inputs; i++) {
2133 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2134 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2140 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2142 FilterGraph *fg = ifilter->graph;
2143 int need_reinit, ret, i;
2145 /* determine if the parameters for this input changed */
2146 need_reinit = ifilter->format != frame->format;
2147 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2148 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2151 switch (ifilter->ist->st->codecpar->codec_type) {
2152 case AVMEDIA_TYPE_AUDIO:
2153 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2154 ifilter->channels != frame->channels ||
2155 ifilter->channel_layout != frame->channel_layout;
2157 case AVMEDIA_TYPE_VIDEO:
2158 need_reinit |= ifilter->width != frame->width ||
2159 ifilter->height != frame->height;
2164 ret = ifilter_parameters_from_frame(ifilter, frame);
2169 /* (re)init the graph if possible, otherwise buffer the frame and return */
2170 if (need_reinit || !fg->graph) {
2171 for (i = 0; i < fg->nb_inputs; i++) {
2172 if (!ifilter_has_all_input_formats(fg)) {
2173 AVFrame *tmp = av_frame_clone(frame);
2175 return AVERROR(ENOMEM);
2176 av_frame_unref(frame);
2178 if (!av_fifo_space(ifilter->frame_queue)) {
2179 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2181 av_frame_free(&tmp);
2185 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2190 ret = reap_filters(1);
2191 if (ret < 0 && ret != AVERROR_EOF) {
2193 av_strerror(ret, errbuf, sizeof(errbuf));
2195 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2199 ret = configure_filtergraph(fg);
2201 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2206 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2208 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2215 static int ifilter_send_eof(InputFilter *ifilter)
2221 if (ifilter->filter) {
2222 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2226 // the filtergraph was never configured
2227 FilterGraph *fg = ifilter->graph;
2228 for (i = 0; i < fg->nb_inputs; i++)
2229 if (!fg->inputs[i]->eof)
2231 if (i == fg->nb_inputs) {
2232 // All the input streams have finished without the filtergraph
2233 // ever being configured.
2234 // Mark the output streams as finished.
2235 for (j = 0; j < fg->nb_outputs; j++)
2236 finish_output_stream(fg->outputs[j]->ost);
2243 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2244 // There is the following difference: if you got a frame, you must call
2245 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2246 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2247 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2254 ret = avcodec_send_packet(avctx, pkt);
2255 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2256 // decoded frames with avcodec_receive_frame() until done.
2257 if (ret < 0 && ret != AVERROR_EOF)
2261 ret = avcodec_receive_frame(avctx, frame);
2262 if (ret < 0 && ret != AVERROR(EAGAIN))
2270 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2275 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2276 for (i = 0; i < ist->nb_filters; i++) {
2277 if (i < ist->nb_filters - 1) {
2278 f = ist->filter_frame;
2279 ret = av_frame_ref(f, decoded_frame);
2284 ret = ifilter_send_frame(ist->filters[i], f);
2285 if (ret == AVERROR_EOF)
2286 ret = 0; /* ignore */
2288 av_log(NULL, AV_LOG_ERROR,
2289 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2296 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2299 AVFrame *decoded_frame;
2300 AVCodecContext *avctx = ist->dec_ctx;
2302 AVRational decoded_frame_tb;
2304 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2305 return AVERROR(ENOMEM);
2306 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2307 return AVERROR(ENOMEM);
2308 decoded_frame = ist->decoded_frame;
2310 update_benchmark(NULL);
2311 ret = decode(avctx, decoded_frame, got_output, pkt);
2312 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2316 if (ret >= 0 && avctx->sample_rate <= 0) {
2317 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2318 ret = AVERROR_INVALIDDATA;
2321 if (ret != AVERROR_EOF)
2322 check_decode_result(ist, got_output, ret);
2324 if (!*got_output || ret < 0)
2327 ist->samples_decoded += decoded_frame->nb_samples;
2328 ist->frames_decoded++;
2331 /* increment next_dts to use for the case where the input stream does not
2332 have timestamps or there are multiple frames in the packet */
2333 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2335 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2339 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2340 decoded_frame_tb = ist->st->time_base;
2341 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2342 decoded_frame->pts = pkt->pts;
2343 decoded_frame_tb = ist->st->time_base;
2345 decoded_frame->pts = ist->dts;
2346 decoded_frame_tb = AV_TIME_BASE_Q;
2348 if (decoded_frame->pts != AV_NOPTS_VALUE)
2349 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2350 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2351 (AVRational){1, avctx->sample_rate});
2352 ist->nb_samples = decoded_frame->nb_samples;
2353 err = send_frame_to_filters(ist, decoded_frame);
2355 av_frame_unref(ist->filter_frame);
2356 av_frame_unref(decoded_frame);
2357 return err < 0 ? err : ret;
2360 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2363 AVFrame *decoded_frame;
2364 int i, ret = 0, err = 0;
2365 int64_t best_effort_timestamp;
2366 int64_t dts = AV_NOPTS_VALUE;
2369 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2370 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2372 if (!eof && pkt && pkt->size == 0)
2375 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2376 return AVERROR(ENOMEM);
2377 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2378 return AVERROR(ENOMEM);
2379 decoded_frame = ist->decoded_frame;
2380 if (ist->dts != AV_NOPTS_VALUE)
2381 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2384 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2387 // The old code used to set dts on the drain packet, which does not work
2388 // with the new API anymore.
2390 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2392 return AVERROR(ENOMEM);
2393 ist->dts_buffer = new;
2394 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2397 update_benchmark(NULL);
2398 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2399 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2403 // The following line may be required in some cases where there is no parser
2404 // or the parser does not has_b_frames correctly
2405 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2406 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2407 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2409 av_log(ist->dec_ctx, AV_LOG_WARNING,
2410 "video_delay is larger in decoder than demuxer %d > %d.\n"
2411 "If you want to help, upload a sample "
2412 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2413 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2414 ist->dec_ctx->has_b_frames,
2415 ist->st->codecpar->video_delay);
2418 if (ret != AVERROR_EOF)
2419 check_decode_result(ist, got_output, ret);
2421 if (*got_output && ret >= 0) {
2422 if (ist->dec_ctx->width != decoded_frame->width ||
2423 ist->dec_ctx->height != decoded_frame->height ||
2424 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2425 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2426 decoded_frame->width,
2427 decoded_frame->height,
2428 decoded_frame->format,
2429 ist->dec_ctx->width,
2430 ist->dec_ctx->height,
2431 ist->dec_ctx->pix_fmt);
2435 if (!*got_output || ret < 0)
2438 if(ist->top_field_first>=0)
2439 decoded_frame->top_field_first = ist->top_field_first;
2441 ist->frames_decoded++;
2443 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2444 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2448 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2450 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2452 if (ist->framerate.num)
2453 best_effort_timestamp = ist->cfr_next_pts++;
2455 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2456 best_effort_timestamp = ist->dts_buffer[0];
2458 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2459 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2460 ist->nb_dts_buffer--;
2463 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2464 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2466 if (ts != AV_NOPTS_VALUE)
2467 ist->next_pts = ist->pts = ts;
2471 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2472 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2473 ist->st->index, av_ts2str(decoded_frame->pts),
2474 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2475 best_effort_timestamp,
2476 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2477 decoded_frame->key_frame, decoded_frame->pict_type,
2478 ist->st->time_base.num, ist->st->time_base.den);
2481 if (ist->st->sample_aspect_ratio.num)
2482 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2484 err = send_frame_to_filters(ist, decoded_frame);
2487 av_frame_unref(ist->filter_frame);
2488 av_frame_unref(decoded_frame);
2489 return err < 0 ? err : ret;
2492 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2495 AVSubtitle subtitle;
2497 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2498 &subtitle, got_output, pkt);
2500 check_decode_result(NULL, got_output, ret);
2502 if (ret < 0 || !*got_output) {
2505 sub2video_flush(ist);
2509 if (ist->fix_sub_duration) {
2511 if (ist->prev_sub.got_output) {
2512 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2513 1000, AV_TIME_BASE);
2514 if (end < ist->prev_sub.subtitle.end_display_time) {
2515 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2516 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2517 ist->prev_sub.subtitle.end_display_time, end,
2518 end <= 0 ? ", dropping it" : "");
2519 ist->prev_sub.subtitle.end_display_time = end;
2522 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2523 FFSWAP(int, ret, ist->prev_sub.ret);
2524 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2532 if (ist->sub2video.frame) {
2533 sub2video_update(ist, &subtitle);
2534 } else if (ist->nb_filters) {
2535 if (!ist->sub2video.sub_queue)
2536 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2537 if (!ist->sub2video.sub_queue)
2539 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2540 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2544 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2548 if (!subtitle.num_rects)
2551 ist->frames_decoded++;
2553 for (i = 0; i < nb_output_streams; i++) {
2554 OutputStream *ost = output_streams[i];
2556 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2557 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2560 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2565 avsubtitle_free(&subtitle);
2569 static int send_filter_eof(InputStream *ist)
2572 for (i = 0; i < ist->nb_filters; i++) {
2573 ret = ifilter_send_eof(ist->filters[i]);
2580 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2581 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2585 int eof_reached = 0;
2588 if (!ist->saw_first_ts) {
2589 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2591 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2592 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2593 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2595 ist->saw_first_ts = 1;
2598 if (ist->next_dts == AV_NOPTS_VALUE)
2599 ist->next_dts = ist->dts;
2600 if (ist->next_pts == AV_NOPTS_VALUE)
2601 ist->next_pts = ist->pts;
2605 av_init_packet(&avpkt);
2612 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2613 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2614 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2615 ist->next_pts = ist->pts = ist->dts;
2618 // while we have more to decode or while the decoder did output something on EOF
2619 while (ist->decoding_needed) {
2620 int64_t duration = 0;
2622 int decode_failed = 0;
2624 ist->pts = ist->next_pts;
2625 ist->dts = ist->next_dts;
2627 switch (ist->dec_ctx->codec_type) {
2628 case AVMEDIA_TYPE_AUDIO:
2629 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2632 case AVMEDIA_TYPE_VIDEO:
2633 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2635 if (!repeating || !pkt || got_output) {
2636 if (pkt && pkt->duration) {
2637 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2638 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2639 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2640 duration = ((int64_t)AV_TIME_BASE *
2641 ist->dec_ctx->framerate.den * ticks) /
2642 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2645 if(ist->dts != AV_NOPTS_VALUE && duration) {
2646 ist->next_dts += duration;
2648 ist->next_dts = AV_NOPTS_VALUE;
2652 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2654 case AVMEDIA_TYPE_SUBTITLE:
2657 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2658 if (!pkt && ret >= 0)
2665 if (ret == AVERROR_EOF) {
2671 if (decode_failed) {
2672 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2673 ist->file_index, ist->st->index, av_err2str(ret));
2675 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2676 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2678 if (!decode_failed || exit_on_error)
2684 ist->got_output = 1;
2689 // During draining, we might get multiple output frames in this loop.
2690 // ffmpeg.c does not drain the filter chain on configuration changes,
2691 // which means if we send multiple frames at once to the filters, and
2692 // one of those frames changes configuration, the buffered frames will
2693 // be lost. This can upset certain FATE tests.
2694 // Decode only 1 frame per call on EOF to appease these FATE tests.
2695 // The ideal solution would be to rewrite decoding to use the new
2696 // decoding API in a better way.
2703 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2704 /* except when looping we need to flush but not to send an EOF */
2705 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2706 int ret = send_filter_eof(ist);
2708 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2713 /* handle stream copy */
2714 if (!ist->decoding_needed) {
2715 ist->dts = ist->next_dts;
2716 switch (ist->dec_ctx->codec_type) {
2717 case AVMEDIA_TYPE_AUDIO:
2718 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2719 ist->dec_ctx->sample_rate;
2721 case AVMEDIA_TYPE_VIDEO:
2722 if (ist->framerate.num) {
2723 // TODO: Remove work-around for c99-to-c89 issue 7
2724 AVRational time_base_q = AV_TIME_BASE_Q;
2725 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727 } else if (pkt->duration) {
2728 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729 } else if(ist->dec_ctx->framerate.num != 0) {
2730 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731 ist->next_dts += ((int64_t)AV_TIME_BASE *
2732 ist->dec_ctx->framerate.den * ticks) /
2733 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2737 ist->pts = ist->dts;
2738 ist->next_pts = ist->next_dts;
2740 for (i = 0; pkt && i < nb_output_streams; i++) {
2741 OutputStream *ost = output_streams[i];
2743 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2746 do_streamcopy(ist, ost, pkt);
2749 return !eof_reached;
2752 static void print_sdp(void)
2757 AVIOContext *sdp_pb;
2758 AVFormatContext **avc;
2760 for (i = 0; i < nb_output_files; i++) {
2761 if (!output_files[i]->header_written)
2765 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2768 for (i = 0, j = 0; i < nb_output_files; i++) {
2769 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770 avc[j] = output_files[i]->ctx;
2778 av_sdp_create(avc, j, sdp, sizeof(sdp));
2780 if (!sdp_filename) {
2781 printf("SDP:\n%s\n", sdp);
2784 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2787 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2788 avio_closep(&sdp_pb);
2789 av_freep(&sdp_filename);
2797 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2800 for (i = 0; hwaccels[i].name; i++)
2801 if (hwaccels[i].pix_fmt == pix_fmt)
2802 return &hwaccels[i];
2806 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2808 InputStream *ist = s->opaque;
2809 const enum AVPixelFormat *p;
2812 for (p = pix_fmts; *p != -1; p++) {
2813 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2814 const HWAccel *hwaccel;
2816 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2819 hwaccel = get_hwaccel(*p);
2821 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2822 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2825 ret = hwaccel->init(s);
2827 if (ist->hwaccel_id == hwaccel->id) {
2828 av_log(NULL, AV_LOG_FATAL,
2829 "%s hwaccel requested for input stream #%d:%d, "
2830 "but cannot be initialized.\n", hwaccel->name,
2831 ist->file_index, ist->st->index);
2832 return AV_PIX_FMT_NONE;
2837 if (ist->hw_frames_ctx) {
2838 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2839 if (!s->hw_frames_ctx)
2840 return AV_PIX_FMT_NONE;
2843 ist->active_hwaccel_id = hwaccel->id;
2844 ist->hwaccel_pix_fmt = *p;
2851 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2853 InputStream *ist = s->opaque;
2855 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2856 return ist->hwaccel_get_buffer(s, frame, flags);
2858 return avcodec_default_get_buffer2(s, frame, flags);
2861 static int init_input_stream(int ist_index, char *error, int error_len)
2864 InputStream *ist = input_streams[ist_index];
2866 if (ist->decoding_needed) {
2867 AVCodec *codec = ist->dec;
2869 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2870 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2871 return AVERROR(EINVAL);
2874 ist->dec_ctx->opaque = ist;
2875 ist->dec_ctx->get_format = get_format;
2876 ist->dec_ctx->get_buffer2 = get_buffer;
2877 ist->dec_ctx->thread_safe_callbacks = 1;
2879 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2880 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2881 (ist->decoding_needed & DECODING_FOR_OST)) {
2882 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2883 if (ist->decoding_needed & DECODING_FOR_FILTER)
2884 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2887 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2889 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2890 * audio, and video decoders such as cuvid or mediacodec */
2891 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2893 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2894 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2895 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2896 if (ret == AVERROR_EXPERIMENTAL)
2897 abort_codec_experimental(codec, 0);
2899 snprintf(error, error_len,
2900 "Error while opening decoder for input stream "
2902 ist->file_index, ist->st->index, av_err2str(ret));
2905 assert_avoptions(ist->decoder_opts);
2908 ist->next_pts = AV_NOPTS_VALUE;
2909 ist->next_dts = AV_NOPTS_VALUE;
2914 static InputStream *get_input_stream(OutputStream *ost)
2916 if (ost->source_index >= 0)
2917 return input_streams[ost->source_index];
2921 static int compare_int64(const void *a, const void *b)
2923 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2926 /* open the muxer when all the streams are initialized */
2927 static int check_init_output_file(OutputFile *of, int file_index)
2931 for (i = 0; i < of->ctx->nb_streams; i++) {
2932 OutputStream *ost = output_streams[of->ost_index + i];
2933 if (!ost->initialized)
2937 of->ctx->interrupt_callback = int_cb;
2939 ret = avformat_write_header(of->ctx, &of->opts);
2941 av_log(NULL, AV_LOG_ERROR,
2942 "Could not write header for output file #%d "
2943 "(incorrect codec parameters ?): %s\n",
2944 file_index, av_err2str(ret));
2947 //assert_avoptions(of->opts);
2948 of->header_written = 1;
2950 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2952 if (sdp_filename || want_sdp)
2955 /* flush the muxing queues */
2956 for (i = 0; i < of->ctx->nb_streams; i++) {
2957 OutputStream *ost = output_streams[of->ost_index + i];
2959 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2960 if (!av_fifo_size(ost->muxing_queue))
2961 ost->mux_timebase = ost->st->time_base;
2963 while (av_fifo_size(ost->muxing_queue)) {
2965 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2966 write_packet(of, &pkt, ost);
2973 static int init_output_bsfs(OutputStream *ost)
2978 if (!ost->nb_bitstream_filters)
2981 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2982 ctx = ost->bsf_ctx[i];
2984 ret = avcodec_parameters_copy(ctx->par_in,
2985 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2989 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2991 ret = av_bsf_init(ctx);
2993 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2994 ost->bsf_ctx[i]->filter->name);
2999 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3000 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3004 ost->st->time_base = ctx->time_base_out;
3009 static int init_output_stream_streamcopy(OutputStream *ost)
3011 OutputFile *of = output_files[ost->file_index];
3012 InputStream *ist = get_input_stream(ost);
3013 AVCodecParameters *par_dst = ost->st->codecpar;
3014 AVCodecParameters *par_src = ost->ref_par;
3017 uint32_t codec_tag = par_dst->codec_tag;
3019 av_assert0(ist && !ost->filter);
3021 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3023 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3025 av_log(NULL, AV_LOG_FATAL,
3026 "Error setting up codec context options.\n");
3029 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3032 unsigned int codec_tag_tmp;
3033 if (!of->ctx->oformat->codec_tag ||
3034 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3035 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3036 codec_tag = par_src->codec_tag;
3039 ret = avcodec_parameters_copy(par_dst, par_src);
3043 par_dst->codec_tag = codec_tag;
3045 if (!ost->frame_rate.num)
3046 ost->frame_rate = ist->framerate;
3047 ost->st->avg_frame_rate = ost->frame_rate;
3049 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3053 // copy timebase while removing common factors
3054 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3055 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3057 // copy estimated duration as a hint to the muxer
3058 if (ost->st->duration <= 0 && ist->st->duration > 0)
3059 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3062 ost->st->disposition = ist->st->disposition;
3064 if (ist->st->nb_side_data) {
3065 for (i = 0; i < ist->st->nb_side_data; i++) {
3066 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3069 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3071 return AVERROR(ENOMEM);
3072 memcpy(dst_data, sd_src->data, sd_src->size);
3076 if (ost->rotate_overridden) {
3077 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3078 sizeof(int32_t) * 9);
3080 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3083 ost->parser = av_parser_init(par_dst->codec_id);
3084 ost->parser_avctx = avcodec_alloc_context3(NULL);
3085 if (!ost->parser_avctx)
3086 return AVERROR(ENOMEM);
3088 switch (par_dst->codec_type) {
3089 case AVMEDIA_TYPE_AUDIO:
3090 if (audio_volume != 256) {
3091 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3094 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3095 par_dst->block_align= 0;
3096 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3097 par_dst->block_align= 0;
3099 case AVMEDIA_TYPE_VIDEO:
3100 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3102 av_mul_q(ost->frame_aspect_ratio,
3103 (AVRational){ par_dst->height, par_dst->width });
3104 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3105 "with stream copy may produce invalid files\n");
3107 else if (ist->st->sample_aspect_ratio.num)
3108 sar = ist->st->sample_aspect_ratio;
3110 sar = par_src->sample_aspect_ratio;
3111 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3112 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3113 ost->st->r_frame_rate = ist->st->r_frame_rate;
3117 ost->mux_timebase = ist->st->time_base;
3122 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3124 AVDictionaryEntry *e;
3126 uint8_t *encoder_string;
3127 int encoder_string_len;
3128 int format_flags = 0;
3129 int codec_flags = 0;
3131 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3134 e = av_dict_get(of->opts, "fflags", NULL, 0);
3136 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3139 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3141 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3143 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3146 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3149 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3150 encoder_string = av_mallocz(encoder_string_len);
3151 if (!encoder_string)
3154 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3155 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3157 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3158 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3159 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3160 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3163 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3164 AVCodecContext *avctx)
3167 int n = 1, i, size, index = 0;
3170 for (p = kf; *p; p++)
3174 pts = av_malloc_array(size, sizeof(*pts));
3176 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3181 for (i = 0; i < n; i++) {
3182 char *next = strchr(p, ',');
3187 if (!memcmp(p, "chapters", 8)) {
3189 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3192 if (avf->nb_chapters > INT_MAX - size ||
3193 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3195 av_log(NULL, AV_LOG_FATAL,
3196 "Could not allocate forced key frames array.\n");
3199 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3200 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3202 for (j = 0; j < avf->nb_chapters; j++) {
3203 AVChapter *c = avf->chapters[j];
3204 av_assert1(index < size);
3205 pts[index++] = av_rescale_q(c->start, c->time_base,
3206 avctx->time_base) + t;
3211 t = parse_time_or_die("force_key_frames", p, 1);
3212 av_assert1(index < size);
3213 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3220 av_assert0(index == size);
3221 qsort(pts, size, sizeof(*pts), compare_int64);
3222 ost->forced_kf_count = size;
3223 ost->forced_kf_pts = pts;
3226 static int init_output_stream_encode(OutputStream *ost)
3228 InputStream *ist = get_input_stream(ost);
3229 AVCodecContext *enc_ctx = ost->enc_ctx;
3230 AVCodecContext *dec_ctx = NULL;
3231 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3234 set_encoder_id(output_files[ost->file_index], ost);
3236 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3237 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3238 // which have to be filtered out to prevent leaking them to output files.
3239 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3242 ost->st->disposition = ist->st->disposition;
3244 dec_ctx = ist->dec_ctx;
3246 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3248 for (j = 0; j < oc->nb_streams; j++) {
3249 AVStream *st = oc->streams[j];
3250 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3253 if (j == oc->nb_streams)
3254 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3255 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3256 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3259 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3260 if (!ost->frame_rate.num)
3261 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3262 if (ist && !ost->frame_rate.num)
3263 ost->frame_rate = ist->framerate;
3264 if (ist && !ost->frame_rate.num)
3265 ost->frame_rate = ist->st->r_frame_rate;
3266 if (ist && !ost->frame_rate.num) {
3267 ost->frame_rate = (AVRational){25, 1};
3268 av_log(NULL, AV_LOG_WARNING,
3270 "about the input framerate is available. Falling "
3271 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3272 "if you want a different framerate.\n",
3273 ost->file_index, ost->index);
3275 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3276 if (ost->enc->supported_framerates && !ost->force_fps) {
3277 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3278 ost->frame_rate = ost->enc->supported_framerates[idx];
3280 // reduce frame rate for mpeg4 to be within the spec limits
3281 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3282 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3283 ost->frame_rate.num, ost->frame_rate.den, 65535);
3287 switch (enc_ctx->codec_type) {
3288 case AVMEDIA_TYPE_AUDIO:
3289 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3291 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3292 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3293 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3294 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3295 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3296 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3298 case AVMEDIA_TYPE_VIDEO:
3299 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3300 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3301 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3302 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3303 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3304 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3305 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3307 for (j = 0; j < ost->forced_kf_count; j++)
3308 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3310 enc_ctx->time_base);
3312 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3313 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3314 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3315 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3316 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3317 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3318 if (!strncmp(ost->enc->name, "libx264", 7) &&
3319 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3320 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3321 av_log(NULL, AV_LOG_WARNING,
3322 "No pixel format specified, %s for H.264 encoding chosen.\n"
3323 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3324 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3325 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3326 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3327 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3328 av_log(NULL, AV_LOG_WARNING,
3329 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3330 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3331 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3332 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3334 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3335 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3337 enc_ctx->framerate = ost->frame_rate;
3339 ost->st->avg_frame_rate = ost->frame_rate;
3342 enc_ctx->width != dec_ctx->width ||
3343 enc_ctx->height != dec_ctx->height ||
3344 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3345 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3348 if (ost->forced_keyframes) {
3349 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3350 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3351 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3353 av_log(NULL, AV_LOG_ERROR,
3354 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3357 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3358 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3359 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3360 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3362 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3363 // parse it only for static kf timings
3364 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3365 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3369 case AVMEDIA_TYPE_SUBTITLE:
3370 enc_ctx->time_base = AV_TIME_BASE_Q;
3371 if (!enc_ctx->width) {
3372 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3373 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3376 case AVMEDIA_TYPE_DATA:
3383 ost->mux_timebase = enc_ctx->time_base;
3388 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3392 if (ost->encoding_needed) {
3393 AVCodec *codec = ost->enc;
3394 AVCodecContext *dec = NULL;
3397 ret = init_output_stream_encode(ost);
3401 if ((ist = get_input_stream(ost)))
3403 if (dec && dec->subtitle_header) {
3404 /* ASS code assumes this buffer is null terminated so add extra byte. */
3405 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3406 if (!ost->enc_ctx->subtitle_header)
3407 return AVERROR(ENOMEM);
3408 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3409 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3411 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3412 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3413 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3415 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3416 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3417 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3419 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3420 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3421 av_buffersink_get_format(ost->filter->filter)) {
3422 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3423 if (!ost->enc_ctx->hw_frames_ctx)
3424 return AVERROR(ENOMEM);
3427 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3428 if (ret == AVERROR_EXPERIMENTAL)
3429 abort_codec_experimental(codec, 1);
3430 snprintf(error, error_len,
3431 "Error while opening encoder for output stream #%d:%d - "
3432 "maybe incorrect parameters such as bit_rate, rate, width or height",
3433 ost->file_index, ost->index);
3436 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3437 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3438 av_buffersink_set_frame_size(ost->filter->filter,
3439 ost->enc_ctx->frame_size);
3440 assert_avoptions(ost->encoder_opts);
3441 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3442 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3443 " It takes bits/s as argument, not kbits/s\n");
3445 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3447 av_log(NULL, AV_LOG_FATAL,
3448 "Error initializing the output stream codec context.\n");
3452 * FIXME: ost->st->codec should't be needed here anymore.
3454 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3458 if (ost->enc_ctx->nb_coded_side_data) {
3461 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3462 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3465 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3467 return AVERROR(ENOMEM);
3468 memcpy(dst_data, sd_src->data, sd_src->size);
3473 * Add global input side data. For now this is naive, and copies it
3474 * from the input stream's global side data. All side data should
3475 * really be funneled over AVFrame and libavfilter, then added back to
3476 * packet side data, and then potentially using the first packet for
3481 for (i = 0; i < ist->st->nb_side_data; i++) {
3482 AVPacketSideData *sd = &ist->st->side_data[i];
3483 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3485 return AVERROR(ENOMEM);
3486 memcpy(dst, sd->data, sd->size);
3487 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3488 av_display_rotation_set((uint32_t *)dst, 0);
3492 // copy timebase while removing common factors
3493 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3494 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3496 // copy estimated duration as a hint to the muxer
3497 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3498 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3500 ost->st->codec->codec= ost->enc_ctx->codec;
3501 } else if (ost->stream_copy) {
3502 ret = init_output_stream_streamcopy(ost);
3507 * FIXME: will the codec context used by the parser during streamcopy
3508 * This should go away with the new parser API.
3510 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3515 // parse user provided disposition, and update stream values
3516 if (ost->disposition) {
3517 static const AVOption opts[] = {
3518 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3519 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3520 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3521 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3522 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3523 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3524 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3525 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3526 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3527 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3528 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3529 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3530 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3531 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3534 static const AVClass class = {
3536 .item_name = av_default_item_name,
3538 .version = LIBAVUTIL_VERSION_INT,
3540 const AVClass *pclass = &class;
3542 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3547 /* initialize bitstream filters for the output stream
3548 * needs to be done here, because the codec id for streamcopy is not
3549 * known until now */
3550 ret = init_output_bsfs(ost);
3554 ost->initialized = 1;
3556 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3563 static void report_new_stream(int input_index, AVPacket *pkt)
3565 InputFile *file = input_files[input_index];
3566 AVStream *st = file->ctx->streams[pkt->stream_index];
3568 if (pkt->stream_index < file->nb_streams_warn)
3570 av_log(file->ctx, AV_LOG_WARNING,
3571 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3572 av_get_media_type_string(st->codecpar->codec_type),
3573 input_index, pkt->stream_index,
3574 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3575 file->nb_streams_warn = pkt->stream_index + 1;
3578 static int transcode_init(void)
3580 int ret = 0, i, j, k;
3581 AVFormatContext *oc;
3584 char error[1024] = {0};
3586 for (i = 0; i < nb_filtergraphs; i++) {
3587 FilterGraph *fg = filtergraphs[i];
3588 for (j = 0; j < fg->nb_outputs; j++) {
3589 OutputFilter *ofilter = fg->outputs[j];
3590 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3592 if (fg->nb_inputs != 1)
3594 for (k = nb_input_streams-1; k >= 0 ; k--)
3595 if (fg->inputs[0]->ist == input_streams[k])
3597 ofilter->ost->source_index = k;
3601 /* init framerate emulation */
3602 for (i = 0; i < nb_input_files; i++) {
3603 InputFile *ifile = input_files[i];
3604 if (ifile->rate_emu)
3605 for (j = 0; j < ifile->nb_streams; j++)
3606 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3609 /* init input streams */
3610 for (i = 0; i < nb_input_streams; i++)
3611 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3612 for (i = 0; i < nb_output_streams; i++) {
3613 ost = output_streams[i];
3614 avcodec_close(ost->enc_ctx);
3619 /* open each encoder */
3620 for (i = 0; i < nb_output_streams; i++) {
3621 // skip streams fed from filtergraphs until we have a frame for them
3622 if (output_streams[i]->filter)
3625 ret = init_output_stream(output_streams[i], error, sizeof(error));
3630 /* discard unused programs */
3631 for (i = 0; i < nb_input_files; i++) {
3632 InputFile *ifile = input_files[i];
3633 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3634 AVProgram *p = ifile->ctx->programs[j];
3635 int discard = AVDISCARD_ALL;
3637 for (k = 0; k < p->nb_stream_indexes; k++)
3638 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3639 discard = AVDISCARD_DEFAULT;
3642 p->discard = discard;
3646 /* write headers for files with no streams */
3647 for (i = 0; i < nb_output_files; i++) {
3648 oc = output_files[i]->ctx;
3649 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3650 ret = check_init_output_file(output_files[i], i);
3657 /* dump the stream mapping */
3658 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3659 for (i = 0; i < nb_input_streams; i++) {
3660 ist = input_streams[i];
3662 for (j = 0; j < ist->nb_filters; j++) {
3663 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3664 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3665 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3666 ist->filters[j]->name);
3667 if (nb_filtergraphs > 1)
3668 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3669 av_log(NULL, AV_LOG_INFO, "\n");
3674 for (i = 0; i < nb_output_streams; i++) {
3675 ost = output_streams[i];
3677 if (ost->attachment_filename) {
3678 /* an attached file */
3679 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3680 ost->attachment_filename, ost->file_index, ost->index);
3684 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3685 /* output from a complex graph */
3686 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3687 if (nb_filtergraphs > 1)
3688 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3690 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3691 ost->index, ost->enc ? ost->enc->name : "?");
3695 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3696 input_streams[ost->source_index]->file_index,
3697 input_streams[ost->source_index]->st->index,
3700 if (ost->sync_ist != input_streams[ost->source_index])
3701 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3702 ost->sync_ist->file_index,
3703 ost->sync_ist->st->index);
3704 if (ost->stream_copy)
3705 av_log(NULL, AV_LOG_INFO, " (copy)");
3707 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3708 const AVCodec *out_codec = ost->enc;
3709 const char *decoder_name = "?";
3710 const char *in_codec_name = "?";
3711 const char *encoder_name = "?";
3712 const char *out_codec_name = "?";
3713 const AVCodecDescriptor *desc;
3716 decoder_name = in_codec->name;
3717 desc = avcodec_descriptor_get(in_codec->id);
3719 in_codec_name = desc->name;
3720 if (!strcmp(decoder_name, in_codec_name))
3721 decoder_name = "native";
3725 encoder_name = out_codec->name;
3726 desc = avcodec_descriptor_get(out_codec->id);
3728 out_codec_name = desc->name;
3729 if (!strcmp(encoder_name, out_codec_name))
3730 encoder_name = "native";
3733 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3734 in_codec_name, decoder_name,
3735 out_codec_name, encoder_name);
3737 av_log(NULL, AV_LOG_INFO, "\n");
3741 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3745 atomic_store(&transcode_init_done, 1);
3750 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3751 static int need_output(void)
3755 for (i = 0; i < nb_output_streams; i++) {
3756 OutputStream *ost = output_streams[i];
3757 OutputFile *of = output_files[ost->file_index];
3758 AVFormatContext *os = output_files[ost->file_index]->ctx;
3760 if (ost->finished ||
3761 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3763 if (ost->frame_number >= ost->max_frames) {
3765 for (j = 0; j < of->ctx->nb_streams; j++)
3766 close_output_stream(output_streams[of->ost_index + j]);
3777 * Select the output stream to process.
3779 * @return selected output stream, or NULL if none available
3781 static OutputStream *choose_output(void)
3784 int64_t opts_min = INT64_MAX;
3785 OutputStream *ost_min = NULL;
3787 for (i = 0; i < nb_output_streams; i++) {
3788 OutputStream *ost = output_streams[i];
3789 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3790 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3792 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3793 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3795 if (!ost->initialized && !ost->inputs_done)
3798 if (!ost->finished && opts < opts_min) {
3800 ost_min = ost->unavailable ? NULL : ost;
3806 static void set_tty_echo(int on)
3810 if (tcgetattr(0, &tty) == 0) {
3811 if (on) tty.c_lflag |= ECHO;
3812 else tty.c_lflag &= ~ECHO;
3813 tcsetattr(0, TCSANOW, &tty);
3818 static int check_keyboard_interaction(int64_t cur_time)
3821 static int64_t last_time;
3822 if (received_nb_signals)
3823 return AVERROR_EXIT;
3824 /* read_key() returns 0 on EOF */
3825 if(cur_time - last_time >= 100000 && !run_as_daemon){
3827 last_time = cur_time;
3831 return AVERROR_EXIT;
3832 if (key == '+') av_log_set_level(av_log_get_level()+10);
3833 if (key == '-') av_log_set_level(av_log_get_level()-10);
3834 if (key == 's') qp_hist ^= 1;
3837 do_hex_dump = do_pkt_dump = 0;
3838 } else if(do_pkt_dump){
3842 av_log_set_level(AV_LOG_DEBUG);
3844 if (key == 'c' || key == 'C'){
3845 char buf[4096], target[64], command[256], arg[256] = {0};
3848 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3851 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3856 fprintf(stderr, "\n");
3858 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3859 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3860 target, time, command, arg);
3861 for (i = 0; i < nb_filtergraphs; i++) {
3862 FilterGraph *fg = filtergraphs[i];
3865 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3866 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3867 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3868 } else if (key == 'c') {
3869 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3870 ret = AVERROR_PATCHWELCOME;
3872 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3874 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3879 av_log(NULL, AV_LOG_ERROR,
3880 "Parse error, at least 3 arguments were expected, "
3881 "only %d given in string '%s'\n", n, buf);
3884 if (key == 'd' || key == 'D'){
3887 debug = input_streams[0]->st->codec->debug<<1;
3888 if(!debug) debug = 1;
3889 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3896 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3901 fprintf(stderr, "\n");
3902 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3903 fprintf(stderr,"error parsing debug value\n");
3905 for(i=0;i<nb_input_streams;i++) {
3906 input_streams[i]->st->codec->debug = debug;
3908 for(i=0;i<nb_output_streams;i++) {
3909 OutputStream *ost = output_streams[i];
3910 ost->enc_ctx->debug = debug;
3912 if(debug) av_log_set_level(AV_LOG_DEBUG);
3913 fprintf(stderr,"debug=%d\n", debug);
3916 fprintf(stderr, "key function\n"
3917 "? show this help\n"
3918 "+ increase verbosity\n"
3919 "- decrease verbosity\n"
3920 "c Send command to first matching filter supporting it\n"
3921 "C Send/Queue command to all matching filters\n"
3922 "D cycle through available debug modes\n"
3923 "h dump packets/hex press to cycle through the 3 states\n"
3925 "s Show QP histogram\n"
3932 static void *input_thread(void *arg)
3935 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3940 ret = av_read_frame(f->ctx, &pkt);
3942 if (ret == AVERROR(EAGAIN)) {
3947 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3950 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3951 if (flags && ret == AVERROR(EAGAIN)) {
3953 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3954 av_log(f->ctx, AV_LOG_WARNING,
3955 "Thread message queue blocking; consider raising the "
3956 "thread_queue_size option (current value: %d)\n",
3957 f->thread_queue_size);
3960 if (ret != AVERROR_EOF)
3961 av_log(f->ctx, AV_LOG_ERROR,
3962 "Unable to send packet to main thread: %s\n",
3964 av_packet_unref(&pkt);
3965 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3973 static void free_input_threads(void)
3977 for (i = 0; i < nb_input_files; i++) {
3978 InputFile *f = input_files[i];
3981 if (!f || !f->in_thread_queue)
3983 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3984 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3985 av_packet_unref(&pkt);
3987 pthread_join(f->thread, NULL);
3989 av_thread_message_queue_free(&f->in_thread_queue);
3993 static int init_input_threads(void)
3997 if (nb_input_files == 1)
4000 for (i = 0; i < nb_input_files; i++) {
4001 InputFile *f = input_files[i];
4003 if (f->ctx->pb ? !f->ctx->pb->seekable :
4004 strcmp(f->ctx->iformat->name, "lavfi"))
4005 f->non_blocking = 1;
4006 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4007 f->thread_queue_size, sizeof(AVPacket));
4011 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4012 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4013 av_thread_message_queue_free(&f->in_thread_queue);
4014 return AVERROR(ret);
4020 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4022 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4024 AV_THREAD_MESSAGE_NONBLOCK : 0);
4028 static int get_input_packet(InputFile *f, AVPacket *pkt)
4032 for (i = 0; i < f->nb_streams; i++) {
4033 InputStream *ist = input_streams[f->ist_index + i];
4034 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4035 int64_t now = av_gettime_relative() - ist->start;
4037 return AVERROR(EAGAIN);
4042 if (nb_input_files > 1)
4043 return get_input_packet_mt(f, pkt);
4045 return av_read_frame(f->ctx, pkt);
4048 static int got_eagain(void)
4051 for (i = 0; i < nb_output_streams; i++)
4052 if (output_streams[i]->unavailable)
4057 static void reset_eagain(void)
4060 for (i = 0; i < nb_input_files; i++)
4061 input_files[i]->eagain = 0;
4062 for (i = 0; i < nb_output_streams; i++)
4063 output_streams[i]->unavailable = 0;
4066 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4067 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4068 AVRational time_base)
4074 return tmp_time_base;
4077 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4080 return tmp_time_base;
4086 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4089 AVCodecContext *avctx;
4090 int i, ret, has_audio = 0;
4091 int64_t duration = 0;
4093 ret = av_seek_frame(is, -1, is->start_time, 0);
4097 for (i = 0; i < ifile->nb_streams; i++) {
4098 ist = input_streams[ifile->ist_index + i];
4099 avctx = ist->dec_ctx;
4102 if (ist->decoding_needed) {
4103 process_input_packet(ist, NULL, 1);
4104 avcodec_flush_buffers(avctx);
4107 /* duration is the length of the last frame in a stream
4108 * when audio stream is present we don't care about
4109 * last video frame length because it's not defined exactly */
4110 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4114 for (i = 0; i < ifile->nb_streams; i++) {
4115 ist = input_streams[ifile->ist_index + i];
4116 avctx = ist->dec_ctx;
4119 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4120 AVRational sample_rate = {1, avctx->sample_rate};
4122 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4126 if (ist->framerate.num) {
4127 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4128 } else if (ist->st->avg_frame_rate.num) {
4129 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4130 } else duration = 1;
4132 if (!ifile->duration)
4133 ifile->time_base = ist->st->time_base;
4134 /* the total duration of the stream, max_pts - min_pts is
4135 * the duration of the stream without the last frame */
4136 duration += ist->max_pts - ist->min_pts;
4137 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4141 if (ifile->loop > 0)
4149 * - 0 -- one packet was read and processed
4150 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4151 * this function should be called again
4152 * - AVERROR_EOF -- this function should not be called again
4154 static int process_input(int file_index)
4156 InputFile *ifile = input_files[file_index];
4157 AVFormatContext *is;
4165 ret = get_input_packet(ifile, &pkt);
4167 if (ret == AVERROR(EAGAIN)) {
4171 if (ret < 0 && ifile->loop) {
4172 if ((ret = seek_to_start(ifile, is)) < 0)
4174 ret = get_input_packet(ifile, &pkt);
4175 if (ret == AVERROR(EAGAIN)) {
4181 if (ret != AVERROR_EOF) {
4182 print_error(is->filename, ret);
4187 for (i = 0; i < ifile->nb_streams; i++) {
4188 ist = input_streams[ifile->ist_index + i];
4189 if (ist->decoding_needed) {
4190 ret = process_input_packet(ist, NULL, 0);
4195 /* mark all outputs that don't go through lavfi as finished */
4196 for (j = 0; j < nb_output_streams; j++) {
4197 OutputStream *ost = output_streams[j];
4199 if (ost->source_index == ifile->ist_index + i &&
4200 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4201 finish_output_stream(ost);
4205 ifile->eof_reached = 1;
4206 return AVERROR(EAGAIN);
4212 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4213 is->streams[pkt.stream_index]);
4215 /* the following test is needed in case new streams appear
4216 dynamically in stream : we ignore them */
4217 if (pkt.stream_index >= ifile->nb_streams) {
4218 report_new_stream(file_index, &pkt);
4219 goto discard_packet;
4222 ist = input_streams[ifile->ist_index + pkt.stream_index];
4224 ist->data_size += pkt.size;
4228 goto discard_packet;
4230 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4231 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4236 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4237 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4238 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4239 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4240 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4241 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4242 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4243 av_ts2str(input_files[ist->file_index]->ts_offset),
4244 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4247 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4248 int64_t stime, stime2;
4249 // Correcting starttime based on the enabled streams
4250 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4251 // so we instead do it here as part of discontinuity handling
4252 if ( ist->next_dts == AV_NOPTS_VALUE
4253 && ifile->ts_offset == -is->start_time
4254 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4255 int64_t new_start_time = INT64_MAX;
4256 for (i=0; i<is->nb_streams; i++) {
4257 AVStream *st = is->streams[i];
4258 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4260 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4262 if (new_start_time > is->start_time) {
4263 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4264 ifile->ts_offset = -new_start_time;
4268 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4269 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4270 ist->wrap_correction_done = 1;
4272 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4273 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4274 ist->wrap_correction_done = 0;
4276 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4277 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4278 ist->wrap_correction_done = 0;
4282 /* add the stream-global side data to the first packet */
4283 if (ist->nb_packets == 1) {
4284 for (i = 0; i < ist->st->nb_side_data; i++) {
4285 AVPacketSideData *src_sd = &ist->st->side_data[i];
4288 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4291 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4294 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4298 memcpy(dst_data, src_sd->data, src_sd->size);
4302 if (pkt.dts != AV_NOPTS_VALUE)
4303 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4304 if (pkt.pts != AV_NOPTS_VALUE)
4305 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4307 if (pkt.pts != AV_NOPTS_VALUE)
4308 pkt.pts *= ist->ts_scale;
4309 if (pkt.dts != AV_NOPTS_VALUE)
4310 pkt.dts *= ist->ts_scale;
4312 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4313 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4314 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4315 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4316 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4317 int64_t delta = pkt_dts - ifile->last_ts;
4318 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4319 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4320 ifile->ts_offset -= delta;
4321 av_log(NULL, AV_LOG_DEBUG,
4322 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4323 delta, ifile->ts_offset);
4324 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4325 if (pkt.pts != AV_NOPTS_VALUE)
4326 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4330 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4331 if (pkt.pts != AV_NOPTS_VALUE) {
4332 pkt.pts += duration;
4333 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4334 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4337 if (pkt.dts != AV_NOPTS_VALUE)
4338 pkt.dts += duration;
4340 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4341 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4342 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4343 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4345 int64_t delta = pkt_dts - ist->next_dts;
4346 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4347 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4348 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4349 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4350 ifile->ts_offset -= delta;
4351 av_log(NULL, AV_LOG_DEBUG,
4352 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4353 delta, ifile->ts_offset);
4354 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4355 if (pkt.pts != AV_NOPTS_VALUE)
4356 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4359 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4360 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4361 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4362 pkt.dts = AV_NOPTS_VALUE;
4364 if (pkt.pts != AV_NOPTS_VALUE){
4365 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4366 delta = pkt_pts - ist->next_dts;
4367 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4368 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4369 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4370 pkt.pts = AV_NOPTS_VALUE;
4376 if (pkt.dts != AV_NOPTS_VALUE)
4377 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4380 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4381 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4382 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4383 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4384 av_ts2str(input_files[ist->file_index]->ts_offset),
4385 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4388 sub2video_heartbeat(ist, pkt.pts);
4390 process_input_packet(ist, &pkt, 0);
4393 av_packet_unref(&pkt);
4399 * Perform a step of transcoding for the specified filter graph.
4401 * @param[in] graph filter graph to consider
4402 * @param[out] best_ist input stream where a frame would allow to continue
4403 * @return 0 for success, <0 for error
4405 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4408 int nb_requests, nb_requests_max = 0;
4409 InputFilter *ifilter;
4413 ret = avfilter_graph_request_oldest(graph->graph);
4415 return reap_filters(0);
4417 if (ret == AVERROR_EOF) {
4418 ret = reap_filters(1);
4419 for (i = 0; i < graph->nb_outputs; i++)
4420 close_output_stream(graph->outputs[i]->ost);
4423 if (ret != AVERROR(EAGAIN))
4426 for (i = 0; i < graph->nb_inputs; i++) {
4427 ifilter = graph->inputs[i];
4429 if (input_files[ist->file_index]->eagain ||
4430 input_files[ist->file_index]->eof_reached)
4432 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4433 if (nb_requests > nb_requests_max) {
4434 nb_requests_max = nb_requests;
4440 for (i = 0; i < graph->nb_outputs; i++)
4441 graph->outputs[i]->ost->unavailable = 1;
4447 * Run a single step of transcoding.
4449 * @return 0 for success, <0 for error
4451 static int transcode_step(void)
4454 InputStream *ist = NULL;
4457 ost = choose_output();
4464 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4468 if (ost->filter && !ost->filter->graph->graph) {
4469 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4470 ret = configure_filtergraph(ost->filter->graph);
4472 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4478 if (ost->filter && ost->filter->graph->graph) {
4479 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4483 } else if (ost->filter) {
4485 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4486 InputFilter *ifilter = ost->filter->graph->inputs[i];
4487 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4493 ost->inputs_done = 1;
4497 av_assert0(ost->source_index >= 0);
4498 ist = input_streams[ost->source_index];
4501 ret = process_input(ist->file_index);
4502 if (ret == AVERROR(EAGAIN)) {
4503 if (input_files[ist->file_index]->eagain)
4504 ost->unavailable = 1;
4509 return ret == AVERROR_EOF ? 0 : ret;
4511 return reap_filters(0);
4515 * The following code is the main loop of the file converter
4517 static int transcode(void)
4520 AVFormatContext *os;
4523 int64_t timer_start;
4524 int64_t total_packets_written = 0;
4526 ret = transcode_init();
4530 if (stdin_interaction) {
4531 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4534 timer_start = av_gettime_relative();
4537 if ((ret = init_input_threads()) < 0)
4541 while (!received_sigterm) {
4542 int64_t cur_time= av_gettime_relative();
4544 /* if 'q' pressed, exits */
4545 if (stdin_interaction)
4546 if (check_keyboard_interaction(cur_time) < 0)
4549 /* check if there's any stream where output is still needed */
4550 if (!need_output()) {
4551 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4555 ret = transcode_step();
4556 if (ret < 0 && ret != AVERROR_EOF) {
4558 av_strerror(ret, errbuf, sizeof(errbuf));
4560 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4564 /* dump report by using the output first video and audio streams */
4565 print_report(0, timer_start, cur_time);
4568 free_input_threads();
4571 /* at the end of stream, we must flush the decoder buffers */
4572 for (i = 0; i < nb_input_streams; i++) {
4573 ist = input_streams[i];
4574 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4575 process_input_packet(ist, NULL, 0);
4582 /* write the trailer if needed and close file */
4583 for (i = 0; i < nb_output_files; i++) {
4584 os = output_files[i]->ctx;
4585 if (!output_files[i]->header_written) {
4586 av_log(NULL, AV_LOG_ERROR,
4587 "Nothing was written into output file %d (%s), because "
4588 "at least one of its streams received no packets.\n",
4592 if ((ret = av_write_trailer(os)) < 0) {
4593 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4599 /* dump report by using the first video and audio streams */
4600 print_report(1, timer_start, av_gettime_relative());
4602 /* close each encoder */
4603 for (i = 0; i < nb_output_streams; i++) {
4604 ost = output_streams[i];
4605 if (ost->encoding_needed) {
4606 av_freep(&ost->enc_ctx->stats_in);
4608 total_packets_written += ost->packets_written;
4611 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4612 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4616 /* close each decoder */
4617 for (i = 0; i < nb_input_streams; i++) {
4618 ist = input_streams[i];
4619 if (ist->decoding_needed) {
4620 avcodec_close(ist->dec_ctx);
4621 if (ist->hwaccel_uninit)
4622 ist->hwaccel_uninit(ist->dec_ctx);
4626 av_buffer_unref(&hw_device_ctx);
4633 free_input_threads();
4636 if (output_streams) {
4637 for (i = 0; i < nb_output_streams; i++) {
4638 ost = output_streams[i];
4641 if (fclose(ost->logfile))
4642 av_log(NULL, AV_LOG_ERROR,
4643 "Error closing logfile, loss of information possible: %s\n",
4644 av_err2str(AVERROR(errno)));
4645 ost->logfile = NULL;
4647 av_freep(&ost->forced_kf_pts);
4648 av_freep(&ost->apad);
4649 av_freep(&ost->disposition);
4650 av_dict_free(&ost->encoder_opts);
4651 av_dict_free(&ost->sws_dict);
4652 av_dict_free(&ost->swr_opts);
4653 av_dict_free(&ost->resample_opts);
4661 static int64_t getutime(void)
4664 struct rusage rusage;
4666 getrusage(RUSAGE_SELF, &rusage);
4667 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4668 #elif HAVE_GETPROCESSTIMES
4670 FILETIME c, e, k, u;
4671 proc = GetCurrentProcess();
4672 GetProcessTimes(proc, &c, &e, &k, &u);
4673 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4675 return av_gettime_relative();
4679 static int64_t getmaxrss(void)
4681 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4682 struct rusage rusage;
4683 getrusage(RUSAGE_SELF, &rusage);
4684 return (int64_t)rusage.ru_maxrss * 1024;
4685 #elif HAVE_GETPROCESSMEMORYINFO
4687 PROCESS_MEMORY_COUNTERS memcounters;
4688 proc = GetCurrentProcess();
4689 memcounters.cb = sizeof(memcounters);
4690 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4691 return memcounters.PeakPagefileUsage;
4697 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4701 int main(int argc, char **argv)
4708 register_exit(ffmpeg_cleanup);
4710 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4712 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4713 parse_loglevel(argc, argv, options);
4715 if(argc>1 && !strcmp(argv[1], "-d")){
4717 av_log_set_callback(log_callback_null);
4722 avcodec_register_all();
4724 avdevice_register_all();
4726 avfilter_register_all();
4728 avformat_network_init();
4730 show_banner(argc, argv, options);
4732 /* parse options and open all input/output files */
4733 ret = ffmpeg_parse_options(argc, argv);
4737 if (nb_output_files <= 0 && nb_input_files == 0) {
4739 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4743 /* file converter / grab */
4744 if (nb_output_files <= 0) {
4745 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4749 // if (nb_input_files == 0) {
4750 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4754 for (i = 0; i < nb_output_files; i++) {
4755 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4759 current_time = ti = getutime();
4760 if (transcode() < 0)
4762 ti = getutime() - ti;
4764 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4766 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4767 decode_error_stat[0], decode_error_stat[1]);
4768 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4771 exit_program(received_nb_signals ? 255 : main_return_code);
4772 return main_return_code;