2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && atomic_load(&transcode_init_done)) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
678 * Audio encoders may split the packets -- #frames in != #packets out.
679 * But there is no reordering, so we can limit the number of output packets
680 * by simply dropping them here.
681 * Counting encoded video frames needs to be done separately because of
682 * reordering, see do_video_out().
683 * Do not count the packet when unqueued because it has been counted when queued.
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
693 if (!of->header_written) {
694 AVPacket tmp_pkt = {0};
695 /* the muxer is not initialized yet, buffer the packet */
696 if (!av_fifo_space(ost->muxing_queue)) {
697 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698 ost->max_muxing_queue_size);
699 if (new_size <= av_fifo_size(ost->muxing_queue)) {
700 av_log(NULL, AV_LOG_ERROR,
701 "Too many packets buffered for output stream %d:%d.\n",
702 ost->file_index, ost->st->index);
705 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
709 ret = av_packet_ref(&tmp_pkt, pkt);
712 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713 av_packet_unref(pkt);
717 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
718 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
719 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 ost->quality = sd ? AV_RL32(sd) : -1;
726 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 ost->error[i] = AV_RL64(sd + 8 + 8*i);
735 if (ost->frame_rate.num && ost->is_cfr) {
736 if (pkt->duration > 0)
737 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
743 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746 if (pkt->dts != AV_NOPTS_VALUE &&
747 pkt->pts != AV_NOPTS_VALUE &&
748 pkt->dts > pkt->pts) {
749 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 ost->file_index, ost->st->index);
753 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
758 pkt->dts != AV_NOPTS_VALUE &&
759 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760 ost->last_mux_dts != AV_NOPTS_VALUE) {
761 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762 if (pkt->dts < max) {
763 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764 av_log(s, loglevel, "Non-monotonous DTS in output stream "
765 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
771 av_log(s, loglevel, "changing to %"PRId64". This may result "
772 "in incorrect timestamps in the output file.\n",
774 if (pkt->pts >= pkt->dts)
775 pkt->pts = FFMAX(pkt->pts, max);
780 ost->last_mux_dts = pkt->dts;
782 ost->data_size += pkt->size;
783 ost->packets_written++;
785 pkt->stream_index = ost->index;
788 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790 av_get_media_type_string(ost->enc_ctx->codec_type),
791 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
797 ret = av_interleaved_write_frame(s, pkt);
799 print_error("av_interleaved_write_frame()", ret);
800 main_return_code = 1;
801 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 av_packet_unref(pkt);
806 static void close_output_stream(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 ost->finished |= ENCODER_FINISHED;
812 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813 of->recording_time = FFMIN(of->recording_time, end);
818 * Send a single packet to the output, applying any bitstream filters
819 * associated with the output stream. This may result in any number
820 * of packets actually being written, depending on what bitstream
821 * filters are applied. The supplied packet is consumed and will be
822 * blank (as if newly-allocated) when this function returns.
824 * If eof is set, instead indicate EOF to all bitstream filters and
825 * therefore flush any delayed packets to the output. A blank packet
826 * must be supplied in this case.
828 static void output_packet(OutputFile *of, AVPacket *pkt,
829 OutputStream *ost, int eof)
833 /* apply the output bitstream filters, if any */
834 if (ost->nb_bitstream_filters) {
837 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
844 /* get a packet from the previous filter up the chain */
845 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846 if (ret == AVERROR(EAGAIN)) {
850 } else if (ret == AVERROR_EOF) {
855 /* send it to the next filter down the chain or to the muxer */
856 if (idx < ost->nb_bitstream_filters) {
857 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
865 write_packet(of, pkt, ost, 0);
868 write_packet(of, pkt, ost, 0);
871 if (ret < 0 && ret != AVERROR_EOF) {
872 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
879 static int check_recording_time(OutputStream *ost)
881 OutputFile *of = output_files[ost->file_index];
883 if (of->recording_time != INT64_MAX &&
884 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
885 AV_TIME_BASE_Q) >= 0) {
886 close_output_stream(ost);
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
895 AVCodecContext *enc = ost->enc_ctx;
899 av_init_packet(&pkt);
903 if (!check_recording_time(ost))
906 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907 frame->pts = ost->sync_opts;
908 ost->sync_opts = frame->pts + frame->nb_samples;
909 ost->samples_encoded += frame->nb_samples;
910 ost->frames_encoded++;
912 av_assert0(pkt.size || !pkt.data);
913 update_benchmark(NULL);
915 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918 enc->time_base.num, enc->time_base.den);
921 ret = avcodec_send_frame(enc, frame);
926 ret = avcodec_receive_packet(enc, &pkt);
927 if (ret == AVERROR(EAGAIN))
932 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
934 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
937 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
943 output_packet(of, &pkt, ost, 0);
948 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
952 static void do_subtitle_out(OutputFile *of,
956 int subtitle_out_max_size = 1024 * 1024;
957 int subtitle_out_size, nb, i;
962 if (sub->pts == AV_NOPTS_VALUE) {
963 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972 subtitle_out = av_malloc(subtitle_out_max_size);
974 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
979 /* Note: DVB subtitle need one packet to draw them and one other
980 packet to clear them */
981 /* XXX: signal it in the codec context ? */
982 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
987 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
989 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990 pts -= output_files[ost->file_index]->start_time;
991 for (i = 0; i < nb; i++) {
992 unsigned save_num_rects = sub->num_rects;
994 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995 if (!check_recording_time(ost))
999 // start_display_time is required to be 0
1000 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001 sub->end_display_time -= sub->start_display_time;
1002 sub->start_display_time = 0;
1006 ost->frames_encoded++;
1008 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009 subtitle_out_max_size, sub);
1011 sub->num_rects = save_num_rects;
1012 if (subtitle_out_size < 0) {
1013 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1017 av_init_packet(&pkt);
1018 pkt.data = subtitle_out;
1019 pkt.size = subtitle_out_size;
1020 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023 /* XXX: the pts correction is handled here. Maybe handling
1024 it in the codec would be better */
1026 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1031 output_packet(of, &pkt, ost, 0);
1035 static void do_video_out(OutputFile *of,
1037 AVFrame *next_picture,
1040 int ret, format_video_sync;
1042 AVCodecContext *enc = ost->enc_ctx;
1043 AVCodecParameters *mux_par = ost->st->codecpar;
1044 AVRational frame_rate;
1045 int nb_frames, nb0_frames, i;
1046 double delta, delta0;
1047 double duration = 0;
1049 InputStream *ist = NULL;
1050 AVFilterContext *filter = ost->filter->filter;
1052 if (ost->source_index >= 0)
1053 ist = input_streams[ost->source_index];
1055 frame_rate = av_buffersink_get_frame_rate(filter);
1056 if (frame_rate.num > 0 && frame_rate.den > 0)
1057 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1059 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1062 if (!ost->filters_script &&
1066 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1070 if (!next_picture) {
1072 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073 ost->last_nb0_frames[1],
1074 ost->last_nb0_frames[2]);
1076 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077 delta = delta0 + duration;
1079 /* by default, we output a single frame */
1080 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1083 format_video_sync = video_sync_method;
1084 if (format_video_sync == VSYNC_AUTO) {
1085 if(!strcmp(of->ctx->oformat->name, "avi")) {
1086 format_video_sync = VSYNC_VFR;
1088 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1090 && format_video_sync == VSYNC_CFR
1091 && input_files[ist->file_index]->ctx->nb_streams == 1
1092 && input_files[ist->file_index]->input_ts_offset == 0) {
1093 format_video_sync = VSYNC_VSCFR;
1095 if (format_video_sync == VSYNC_CFR && copy_ts) {
1096 format_video_sync = VSYNC_VSCFR;
1099 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1103 format_video_sync != VSYNC_PASSTHROUGH &&
1104 format_video_sync != VSYNC_DROP) {
1105 if (delta0 < -0.6) {
1106 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1108 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109 sync_ipts = ost->sync_opts;
1114 switch (format_video_sync) {
1116 if (ost->frame_number == 0 && delta0 >= 0.5) {
1117 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1120 ost->sync_opts = lrint(sync_ipts);
1123 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1126 } else if (delta < -1.1)
1128 else if (delta > 1.1) {
1129 nb_frames = lrintf(delta);
1131 nb0_frames = lrintf(delta0 - 0.6);
1137 else if (delta > 0.6)
1138 ost->sync_opts = lrint(sync_ipts);
1141 case VSYNC_PASSTHROUGH:
1142 ost->sync_opts = lrint(sync_ipts);
1149 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150 nb0_frames = FFMIN(nb0_frames, nb_frames);
1152 memmove(ost->last_nb0_frames + 1,
1153 ost->last_nb0_frames,
1154 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155 ost->last_nb0_frames[0] = nb0_frames;
1157 if (nb0_frames == 0 && ost->last_dropped) {
1159 av_log(NULL, AV_LOG_VERBOSE,
1160 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161 ost->frame_number, ost->st->index, ost->last_frame->pts);
1163 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164 if (nb_frames > dts_error_threshold * 30) {
1165 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1169 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171 if (nb_frames_dup > dup_warning) {
1172 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1176 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1178 /* duplicates frame if needed */
1179 for (i = 0; i < nb_frames; i++) {
1180 AVFrame *in_picture;
1181 av_init_packet(&pkt);
1185 if (i < nb0_frames && ost->last_frame) {
1186 in_picture = ost->last_frame;
1188 in_picture = next_picture;
1193 in_picture->pts = ost->sync_opts;
1196 if (!check_recording_time(ost))
1198 if (ost->frame_number >= ost->max_frames)
1202 #if FF_API_LAVF_FMT_RAWPICTURE
1203 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1204 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1205 /* raw pictures are written as AVPicture structure to
1206 avoid any copies. We support temporarily the older
1208 if (in_picture->interlaced_frame)
1209 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1211 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1212 pkt.data = (uint8_t *)in_picture;
1213 pkt.size = sizeof(AVPicture);
1214 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1215 pkt.flags |= AV_PKT_FLAG_KEY;
1217 output_packet(of, &pkt, ost, 0);
1221 int forced_keyframe = 0;
1224 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1225 ost->top_field_first >= 0)
1226 in_picture->top_field_first = !!ost->top_field_first;
1228 if (in_picture->interlaced_frame) {
1229 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 in_picture->quality = enc->global_quality;
1237 in_picture->pict_type = 0;
1239 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240 in_picture->pts * av_q2d(enc->time_base) : NAN;
1241 if (ost->forced_kf_index < ost->forced_kf_count &&
1242 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243 ost->forced_kf_index++;
1244 forced_keyframe = 1;
1245 } else if (ost->forced_keyframes_pexpr) {
1247 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248 res = av_expr_eval(ost->forced_keyframes_pexpr,
1249 ost->forced_keyframes_expr_const_values, NULL);
1250 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251 ost->forced_keyframes_expr_const_values[FKF_N],
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254 ost->forced_keyframes_expr_const_values[FKF_T],
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1258 forced_keyframe = 1;
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260 ost->forced_keyframes_expr_const_values[FKF_N];
1261 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262 ost->forced_keyframes_expr_const_values[FKF_T];
1263 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1266 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267 } else if ( ost->forced_keyframes
1268 && !strncmp(ost->forced_keyframes, "source", 6)
1269 && in_picture->key_frame==1) {
1270 forced_keyframe = 1;
1273 if (forced_keyframe) {
1274 in_picture->pict_type = AV_PICTURE_TYPE_I;
1275 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1278 update_benchmark(NULL);
1280 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283 enc->time_base.num, enc->time_base.den);
1286 ost->frames_encoded++;
1288 ret = avcodec_send_frame(enc, in_picture);
1293 ret = avcodec_receive_packet(enc, &pkt);
1294 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295 if (ret == AVERROR(EAGAIN))
1301 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1307 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308 pkt.pts = ost->sync_opts;
1310 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319 frame_size = pkt.size;
1320 output_packet(of, &pkt, ost, 0);
1322 /* if two pass, output log */
1323 if (ost->logfile && enc->stats_out) {
1324 fprintf(ost->logfile, "%s", enc->stats_out);
1330 * For video, number of frames in == number of packets out.
1331 * But there may be reordering, so we can't throw away frames on encoder
1332 * flush, we need to limit them here, before they go into encoder.
1334 ost->frame_number++;
1336 if (vstats_filename && frame_size)
1337 do_video_stats(ost, frame_size);
1340 if (!ost->last_frame)
1341 ost->last_frame = av_frame_alloc();
1342 av_frame_unref(ost->last_frame);
1343 if (next_picture && ost->last_frame)
1344 av_frame_ref(ost->last_frame, next_picture);
1346 av_frame_free(&ost->last_frame);
1350 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1354 static double psnr(double d)
1356 return -10.0 * log10(d);
1359 static void do_video_stats(OutputStream *ost, int frame_size)
1361 AVCodecContext *enc;
1363 double ti1, bitrate, avg_bitrate;
1365 /* this is executed just the first time do_video_stats is called */
1367 vstats_file = fopen(vstats_filename, "w");
1375 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376 frame_number = ost->st->nb_frames;
1377 if (vstats_version <= 1) {
1378 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379 ost->quality / (float)FF_QP2LAMBDA);
1381 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382 ost->quality / (float)FF_QP2LAMBDA);
1385 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 fprintf(vstats_file,"f_size= %6d ", frame_size);
1389 /* compute pts value */
1390 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1394 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 static void finish_output_stream(OutputStream *ost)
1406 OutputFile *of = output_files[ost->file_index];
1409 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1412 for (i = 0; i < of->ctx->nb_streams; i++)
1413 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1418 * Get and encode new output from any of the filtergraphs, without causing
1421 * @return 0 for success, <0 for severe errors
1423 static int reap_filters(int flush)
1425 AVFrame *filtered_frame = NULL;
1428 /* Reap all buffers present in the buffer sinks */
1429 for (i = 0; i < nb_output_streams; i++) {
1430 OutputStream *ost = output_streams[i];
1431 OutputFile *of = output_files[ost->file_index];
1432 AVFilterContext *filter;
1433 AVCodecContext *enc = ost->enc_ctx;
1436 if (!ost->filter || !ost->filter->graph->graph)
1438 filter = ost->filter->filter;
1440 if (!ost->initialized) {
1441 char error[1024] = "";
1442 ret = init_output_stream(ost, error, sizeof(error));
1444 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445 ost->file_index, ost->index, error);
1450 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451 return AVERROR(ENOMEM);
1453 filtered_frame = ost->filtered_frame;
1456 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1458 AV_BUFFERSINK_FLAG_NO_REQUEST);
1460 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1461 av_log(NULL, AV_LOG_WARNING,
1462 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463 } else if (flush && ret == AVERROR_EOF) {
1464 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1465 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1469 if (ost->finished) {
1470 av_frame_unref(filtered_frame);
1473 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475 AVRational filter_tb = av_buffersink_get_time_base(filter);
1476 AVRational tb = enc->time_base;
1477 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 tb.den <<= extra_bits;
1481 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483 float_pts /= 1 << extra_bits;
1484 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 filtered_frame->pts =
1488 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491 //if (ost->source_index >= 0)
1492 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1494 switch (av_buffersink_get_type(filter)) {
1495 case AVMEDIA_TYPE_VIDEO:
1496 if (!ost->frame_aspect_ratio.num)
1497 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1500 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503 enc->time_base.num, enc->time_base.den);
1506 do_video_out(of, ost, filtered_frame, float_pts);
1508 case AVMEDIA_TYPE_AUDIO:
1509 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510 enc->channels != filtered_frame->channels) {
1511 av_log(NULL, AV_LOG_ERROR,
1512 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1515 do_audio_out(of, ost, filtered_frame);
1518 // TODO support subtitle filters
1522 av_frame_unref(filtered_frame);
1529 static void print_final_stats(int64_t total_size)
1531 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532 uint64_t subtitle_size = 0;
1533 uint64_t data_size = 0;
1534 float percent = -1.0;
1538 for (i = 0; i < nb_output_streams; i++) {
1539 OutputStream *ost = output_streams[i];
1540 switch (ost->enc_ctx->codec_type) {
1541 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544 default: other_size += ost->data_size; break;
1546 extra_size += ost->enc_ctx->extradata_size;
1547 data_size += ost->data_size;
1548 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1549 != AV_CODEC_FLAG_PASS1)
1553 if (data_size && total_size>0 && total_size >= data_size)
1554 percent = 100.0 * (total_size - data_size) / data_size;
1556 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557 video_size / 1024.0,
1558 audio_size / 1024.0,
1559 subtitle_size / 1024.0,
1560 other_size / 1024.0,
1561 extra_size / 1024.0);
1563 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565 av_log(NULL, AV_LOG_INFO, "unknown");
1566 av_log(NULL, AV_LOG_INFO, "\n");
1568 /* print verbose per-stream stats */
1569 for (i = 0; i < nb_input_files; i++) {
1570 InputFile *f = input_files[i];
1571 uint64_t total_packets = 0, total_size = 0;
1573 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574 i, f->ctx->filename);
1576 for (j = 0; j < f->nb_streams; j++) {
1577 InputStream *ist = input_streams[f->ist_index + j];
1578 enum AVMediaType type = ist->dec_ctx->codec_type;
1580 total_size += ist->data_size;
1581 total_packets += ist->nb_packets;
1583 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584 i, j, media_type_string(type));
1585 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586 ist->nb_packets, ist->data_size);
1588 if (ist->decoding_needed) {
1589 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590 ist->frames_decoded);
1591 if (type == AVMEDIA_TYPE_AUDIO)
1592 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593 av_log(NULL, AV_LOG_VERBOSE, "; ");
1596 av_log(NULL, AV_LOG_VERBOSE, "\n");
1599 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600 total_packets, total_size);
1603 for (i = 0; i < nb_output_files; i++) {
1604 OutputFile *of = output_files[i];
1605 uint64_t total_packets = 0, total_size = 0;
1607 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608 i, of->ctx->filename);
1610 for (j = 0; j < of->ctx->nb_streams; j++) {
1611 OutputStream *ost = output_streams[of->ost_index + j];
1612 enum AVMediaType type = ost->enc_ctx->codec_type;
1614 total_size += ost->data_size;
1615 total_packets += ost->packets_written;
1617 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618 i, j, media_type_string(type));
1619 if (ost->encoding_needed) {
1620 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621 ost->frames_encoded);
1622 if (type == AVMEDIA_TYPE_AUDIO)
1623 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624 av_log(NULL, AV_LOG_VERBOSE, "; ");
1627 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628 ost->packets_written, ost->data_size);
1630 av_log(NULL, AV_LOG_VERBOSE, "\n");
1633 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634 total_packets, total_size);
1636 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639 av_log(NULL, AV_LOG_WARNING, "\n");
1641 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1649 AVBPrint buf_script;
1651 AVFormatContext *oc;
1653 AVCodecContext *enc;
1654 int frame_number, vid, i;
1657 int64_t pts = INT64_MIN + 1;
1658 static int64_t last_time = -1;
1659 static int qp_histogram[52];
1660 int hours, mins, secs, us;
1664 if (!print_stats && !is_last_report && !progress_avio)
1667 if (!is_last_report) {
1668 if (last_time == -1) {
1669 last_time = cur_time;
1672 if ((cur_time - last_time) < 500000)
1674 last_time = cur_time;
1677 t = (cur_time-timer_start) / 1000000.0;
1680 oc = output_files[0]->ctx;
1682 total_size = avio_size(oc->pb);
1683 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684 total_size = avio_tell(oc->pb);
1688 av_bprint_init(&buf_script, 0, 1);
1689 for (i = 0; i < nb_output_streams; i++) {
1691 ost = output_streams[i];
1693 if (!ost->stream_copy)
1694 q = ost->quality / (float) FF_QP2LAMBDA;
1696 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1698 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699 ost->file_index, ost->index, q);
1701 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1704 frame_number = ost->frame_number;
1705 fps = t > 1 ? frame_number / t : 0;
1706 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1707 frame_number, fps < 9.95, fps, q);
1708 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1710 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711 ost->file_index, ost->index, q);
1713 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1717 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719 for (j = 0; j < 32; j++)
1720 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1723 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725 double error, error_sum = 0;
1726 double scale, scale_sum = 0;
1728 char type[3] = { 'Y','U','V' };
1729 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1730 for (j = 0; j < 3; j++) {
1731 if (is_last_report) {
1732 error = enc->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735 error = ost->error[j];
1736 scale = enc->width * enc->height * 255.0 * 255.0;
1742 p = psnr(error / scale);
1743 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1744 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745 ost->file_index, ost->index, type[j] | 32, p);
1747 p = psnr(error_sum / scale_sum);
1748 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1749 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750 ost->file_index, ost->index, p);
1754 /* compute min output value */
1755 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1756 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757 ost->st->time_base, AV_TIME_BASE_Q));
1759 nb_frames_drop += ost->last_dropped;
1762 secs = FFABS(pts) / AV_TIME_BASE;
1763 us = FFABS(pts) % AV_TIME_BASE;
1769 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 "size=%8.0fkB time=", total_size / 1024.0);
1777 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1779 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1780 (100 * us) / AV_TIME_BASE);
1783 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1784 av_bprintf(&buf_script, "bitrate=N/A\n");
1786 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1787 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1790 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1793 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1794 hours, mins, secs, us);
1796 if (nb_frames_dup || nb_frames_drop)
1797 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1798 nb_frames_dup, nb_frames_drop);
1799 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1800 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1803 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1804 av_bprintf(&buf_script, "speed=N/A\n");
1806 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1807 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1810 if (print_stats || is_last_report) {
1811 const char end = is_last_report ? '\n' : '\r';
1812 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1813 fprintf(stderr, "%s %c", buf, end);
1815 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1820 if (progress_avio) {
1821 av_bprintf(&buf_script, "progress=%s\n",
1822 is_last_report ? "end" : "continue");
1823 avio_write(progress_avio, buf_script.str,
1824 FFMIN(buf_script.len, buf_script.size - 1));
1825 avio_flush(progress_avio);
1826 av_bprint_finalize(&buf_script, NULL);
1827 if (is_last_report) {
1828 if ((ret = avio_closep(&progress_avio)) < 0)
1829 av_log(NULL, AV_LOG_ERROR,
1830 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1835 print_final_stats(total_size);
1838 static void flush_encoders(void)
1842 for (i = 0; i < nb_output_streams; i++) {
1843 OutputStream *ost = output_streams[i];
1844 AVCodecContext *enc = ost->enc_ctx;
1845 OutputFile *of = output_files[ost->file_index];
1847 if (!ost->encoding_needed)
1850 // Try to enable encoding with no input frames.
1851 // Maybe we should just let encoding fail instead.
1852 if (!ost->initialized) {
1853 FilterGraph *fg = ost->filter->graph;
1854 char error[1024] = "";
1856 av_log(NULL, AV_LOG_WARNING,
1857 "Finishing stream %d:%d without any data written to it.\n",
1858 ost->file_index, ost->st->index);
1860 if (ost->filter && !fg->graph) {
1862 for (x = 0; x < fg->nb_inputs; x++) {
1863 InputFilter *ifilter = fg->inputs[x];
1864 if (ifilter->format < 0) {
1865 AVCodecParameters *par = ifilter->ist->st->codecpar;
1866 // We never got any input. Set a fake format, which will
1867 // come from libavformat.
1868 ifilter->format = par->format;
1869 ifilter->sample_rate = par->sample_rate;
1870 ifilter->channels = par->channels;
1871 ifilter->channel_layout = par->channel_layout;
1872 ifilter->width = par->width;
1873 ifilter->height = par->height;
1874 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1878 if (!ifilter_has_all_input_formats(fg))
1881 ret = configure_filtergraph(fg);
1883 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1887 finish_output_stream(ost);
1890 ret = init_output_stream(ost, error, sizeof(error));
1892 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1893 ost->file_index, ost->index, error);
1898 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1900 #if FF_API_LAVF_FMT_RAWPICTURE
1901 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1905 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1909 const char *desc = NULL;
1913 switch (enc->codec_type) {
1914 case AVMEDIA_TYPE_AUDIO:
1917 case AVMEDIA_TYPE_VIDEO:
1924 av_init_packet(&pkt);
1928 update_benchmark(NULL);
1930 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1931 ret = avcodec_send_frame(enc, NULL);
1933 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1940 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1941 if (ret < 0 && ret != AVERROR_EOF) {
1942 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1947 if (ost->logfile && enc->stats_out) {
1948 fprintf(ost->logfile, "%s", enc->stats_out);
1950 if (ret == AVERROR_EOF) {
1951 output_packet(of, &pkt, ost, 1);
1954 if (ost->finished & MUXER_FINISHED) {
1955 av_packet_unref(&pkt);
1958 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959 pkt_size = pkt.size;
1960 output_packet(of, &pkt, ost, 0);
1961 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1962 do_video_stats(ost, pkt_size);
1969 * Check whether a packet from ist should be written into ost at this time
1971 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1973 OutputFile *of = output_files[ost->file_index];
1974 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1976 if (ost->source_index != ist_index)
1982 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1990 OutputFile *of = output_files[ost->file_index];
1991 InputFile *f = input_files [ist->file_index];
1992 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997 av_init_packet(&opkt);
1999 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000 !ost->copy_initial_nonkeyframes)
2003 if (!ost->frame_number && !ost->copy_prior_start) {
2004 int64_t comp_start = start_time;
2005 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007 if (pkt->pts == AV_NOPTS_VALUE ?
2008 ist->pts < comp_start :
2009 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2013 if (of->recording_time != INT64_MAX &&
2014 ist->pts >= of->recording_time + start_time) {
2015 close_output_stream(ost);
2019 if (f->recording_time != INT64_MAX) {
2020 start_time = f->ctx->start_time;
2021 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022 start_time += f->start_time;
2023 if (ist->pts >= f->recording_time + start_time) {
2024 close_output_stream(ost);
2029 /* force the input stream PTS */
2030 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 opkt.pts = AV_NOPTS_VALUE;
2038 if (pkt->dts == AV_NOPTS_VALUE)
2039 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2041 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042 opkt.dts -= ost_tb_start_time;
2044 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2047 duration = ist->dec_ctx->frame_size;
2048 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2050 ost->mux_timebase) - ost_tb_start_time;
2053 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2055 opkt.flags = pkt->flags;
2056 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2062 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063 &opkt.data, &opkt.size,
2064 pkt->data, pkt->size,
2065 pkt->flags & AV_PKT_FLAG_KEY);
2067 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2072 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2080 av_copy_packet_side_data(&opkt, pkt);
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086 /* store AVPicture in AVPacket, as expected by the output format */
2087 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2089 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2093 opkt.data = (uint8_t *)&pict;
2094 opkt.size = sizeof(AVPicture);
2095 opkt.flags |= AV_PKT_FLAG_KEY;
2099 output_packet(of, &opkt, ost, 0);
2102 int guess_input_channel_layout(InputStream *ist)
2104 AVCodecContext *dec = ist->dec_ctx;
2106 if (!dec->channel_layout) {
2107 char layout_name[256];
2109 if (dec->channels > ist->guess_layout_max)
2111 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2112 if (!dec->channel_layout)
2114 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115 dec->channels, dec->channel_layout);
2116 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2124 if (*got_output || ret<0)
2125 decode_error_stat[ret<0] ++;
2127 if (ret < 0 && exit_on_error)
2130 if (exit_on_error && *got_output && ist) {
2131 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2132 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2157 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2161 switch (ifilter->ist->st->codecpar->codec_type) {
2162 case AVMEDIA_TYPE_AUDIO:
2163 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164 ifilter->channels != frame->channels ||
2165 ifilter->channel_layout != frame->channel_layout;
2167 case AVMEDIA_TYPE_VIDEO:
2168 need_reinit |= ifilter->width != frame->width ||
2169 ifilter->height != frame->height;
2174 ret = ifilter_parameters_from_frame(ifilter, frame);
2179 /* (re)init the graph if possible, otherwise buffer the frame and return */
2180 if (need_reinit || !fg->graph) {
2181 for (i = 0; i < fg->nb_inputs; i++) {
2182 if (!ifilter_has_all_input_formats(fg)) {
2183 AVFrame *tmp = av_frame_clone(frame);
2185 return AVERROR(ENOMEM);
2186 av_frame_unref(frame);
2188 if (!av_fifo_space(ifilter->frame_queue)) {
2189 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2191 av_frame_free(&tmp);
2195 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200 ret = reap_filters(1);
2201 if (ret < 0 && ret != AVERROR_EOF) {
2203 av_strerror(ret, errbuf, sizeof(errbuf));
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2209 ret = configure_filtergraph(fg);
2211 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2218 if (ret != AVERROR_EOF)
2219 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226 static int ifilter_send_eof(InputFilter *ifilter)
2232 if (ifilter->filter) {
2233 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2237 // the filtergraph was never configured
2238 FilterGraph *fg = ifilter->graph;
2239 for (i = 0; i < fg->nb_inputs; i++)
2240 if (!fg->inputs[i]->eof)
2242 if (i == fg->nb_inputs) {
2243 // All the input streams have finished without the filtergraph
2244 // ever being configured.
2245 // Mark the output streams as finished.
2246 for (j = 0; j < fg->nb_outputs; j++)
2247 finish_output_stream(fg->outputs[j]->ost);
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2257 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2265 ret = avcodec_send_packet(avctx, pkt);
2266 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267 // decoded frames with avcodec_receive_frame() until done.
2268 if (ret < 0 && ret != AVERROR_EOF)
2272 ret = avcodec_receive_frame(avctx, frame);
2273 if (ret < 0 && ret != AVERROR(EAGAIN))
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2286 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287 for (i = 0; i < ist->nb_filters; i++) {
2288 if (i < ist->nb_filters - 1) {
2289 f = ist->filter_frame;
2290 ret = av_frame_ref(f, decoded_frame);
2295 ret = ifilter_send_frame(ist->filters[i], f);
2296 if (ret == AVERROR_EOF)
2297 ret = 0; /* ignore */
2299 av_log(NULL, AV_LOG_ERROR,
2300 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2310 AVFrame *decoded_frame;
2311 AVCodecContext *avctx = ist->dec_ctx;
2313 AVRational decoded_frame_tb;
2315 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318 return AVERROR(ENOMEM);
2319 decoded_frame = ist->decoded_frame;
2321 update_benchmark(NULL);
2322 ret = decode(avctx, decoded_frame, got_output, pkt);
2323 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2327 if (ret >= 0 && avctx->sample_rate <= 0) {
2328 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329 ret = AVERROR_INVALIDDATA;
2332 if (ret != AVERROR_EOF)
2333 check_decode_result(ist, got_output, ret);
2335 if (!*got_output || ret < 0)
2338 ist->samples_decoded += decoded_frame->nb_samples;
2339 ist->frames_decoded++;
2342 /* increment next_dts to use for the case where the input stream does not
2343 have timestamps or there are multiple frames in the packet */
2344 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2346 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2350 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351 decoded_frame_tb = ist->st->time_base;
2352 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353 decoded_frame->pts = pkt->pts;
2354 decoded_frame_tb = ist->st->time_base;
2356 decoded_frame->pts = ist->dts;
2357 decoded_frame_tb = AV_TIME_BASE_Q;
2359 if (decoded_frame->pts != AV_NOPTS_VALUE)
2360 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362 (AVRational){1, avctx->sample_rate});
2363 ist->nb_samples = decoded_frame->nb_samples;
2364 err = send_frame_to_filters(ist, decoded_frame);
2366 av_frame_unref(ist->filter_frame);
2367 av_frame_unref(decoded_frame);
2368 return err < 0 ? err : ret;
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2374 AVFrame *decoded_frame;
2375 int i, ret = 0, err = 0;
2376 int64_t best_effort_timestamp;
2377 int64_t dts = AV_NOPTS_VALUE;
2380 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2383 if (!eof && pkt && pkt->size == 0)
2386 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389 return AVERROR(ENOMEM);
2390 decoded_frame = ist->decoded_frame;
2391 if (ist->dts != AV_NOPTS_VALUE)
2392 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2395 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2398 // The old code used to set dts on the drain packet, which does not work
2399 // with the new API anymore.
2401 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2403 return AVERROR(ENOMEM);
2404 ist->dts_buffer = new;
2405 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2408 update_benchmark(NULL);
2409 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2414 // The following line may be required in some cases where there is no parser
2415 // or the parser does not has_b_frames correctly
2416 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2420 av_log(ist->dec_ctx, AV_LOG_WARNING,
2421 "video_delay is larger in decoder than demuxer %d > %d.\n"
2422 "If you want to help, upload a sample "
2423 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425 ist->dec_ctx->has_b_frames,
2426 ist->st->codecpar->video_delay);
2429 if (ret != AVERROR_EOF)
2430 check_decode_result(ist, got_output, ret);
2432 if (*got_output && ret >= 0) {
2433 if (ist->dec_ctx->width != decoded_frame->width ||
2434 ist->dec_ctx->height != decoded_frame->height ||
2435 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437 decoded_frame->width,
2438 decoded_frame->height,
2439 decoded_frame->format,
2440 ist->dec_ctx->width,
2441 ist->dec_ctx->height,
2442 ist->dec_ctx->pix_fmt);
2446 if (!*got_output || ret < 0)
2449 if(ist->top_field_first>=0)
2450 decoded_frame->top_field_first = ist->top_field_first;
2452 ist->frames_decoded++;
2454 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2459 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2461 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2463 if (ist->framerate.num)
2464 best_effort_timestamp = ist->cfr_next_pts++;
2466 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2467 best_effort_timestamp = ist->dts_buffer[0];
2469 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2470 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2471 ist->nb_dts_buffer--;
2474 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2475 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2477 if (ts != AV_NOPTS_VALUE)
2478 ist->next_pts = ist->pts = ts;
2482 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2483 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2484 ist->st->index, av_ts2str(decoded_frame->pts),
2485 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2486 best_effort_timestamp,
2487 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2488 decoded_frame->key_frame, decoded_frame->pict_type,
2489 ist->st->time_base.num, ist->st->time_base.den);
2492 if (ist->st->sample_aspect_ratio.num)
2493 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2495 err = send_frame_to_filters(ist, decoded_frame);
2498 av_frame_unref(ist->filter_frame);
2499 av_frame_unref(decoded_frame);
2500 return err < 0 ? err : ret;
2503 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2506 AVSubtitle subtitle;
2508 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2509 &subtitle, got_output, pkt);
2511 check_decode_result(NULL, got_output, ret);
2513 if (ret < 0 || !*got_output) {
2516 sub2video_flush(ist);
2520 if (ist->fix_sub_duration) {
2522 if (ist->prev_sub.got_output) {
2523 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2524 1000, AV_TIME_BASE);
2525 if (end < ist->prev_sub.subtitle.end_display_time) {
2526 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2527 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2528 ist->prev_sub.subtitle.end_display_time, end,
2529 end <= 0 ? ", dropping it" : "");
2530 ist->prev_sub.subtitle.end_display_time = end;
2533 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2534 FFSWAP(int, ret, ist->prev_sub.ret);
2535 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2543 if (ist->sub2video.frame) {
2544 sub2video_update(ist, &subtitle);
2545 } else if (ist->nb_filters) {
2546 if (!ist->sub2video.sub_queue)
2547 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2548 if (!ist->sub2video.sub_queue)
2550 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2551 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2555 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2559 if (!subtitle.num_rects)
2562 ist->frames_decoded++;
2564 for (i = 0; i < nb_output_streams; i++) {
2565 OutputStream *ost = output_streams[i];
2567 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2568 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2571 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2576 avsubtitle_free(&subtitle);
2580 static int send_filter_eof(InputStream *ist)
2583 for (i = 0; i < ist->nb_filters; i++) {
2584 ret = ifilter_send_eof(ist->filters[i]);
2591 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2592 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2596 int eof_reached = 0;
2599 if (!ist->saw_first_ts) {
2600 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2602 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2603 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2604 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2606 ist->saw_first_ts = 1;
2609 if (ist->next_dts == AV_NOPTS_VALUE)
2610 ist->next_dts = ist->dts;
2611 if (ist->next_pts == AV_NOPTS_VALUE)
2612 ist->next_pts = ist->pts;
2616 av_init_packet(&avpkt);
2623 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2624 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2625 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2626 ist->next_pts = ist->pts = ist->dts;
2629 // while we have more to decode or while the decoder did output something on EOF
2630 while (ist->decoding_needed) {
2631 int64_t duration = 0;
2633 int decode_failed = 0;
2635 ist->pts = ist->next_pts;
2636 ist->dts = ist->next_dts;
2638 switch (ist->dec_ctx->codec_type) {
2639 case AVMEDIA_TYPE_AUDIO:
2640 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2643 case AVMEDIA_TYPE_VIDEO:
2644 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2646 if (!repeating || !pkt || got_output) {
2647 if (pkt && pkt->duration) {
2648 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2649 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2650 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2651 duration = ((int64_t)AV_TIME_BASE *
2652 ist->dec_ctx->framerate.den * ticks) /
2653 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2656 if(ist->dts != AV_NOPTS_VALUE && duration) {
2657 ist->next_dts += duration;
2659 ist->next_dts = AV_NOPTS_VALUE;
2663 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2665 case AVMEDIA_TYPE_SUBTITLE:
2668 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2669 if (!pkt && ret >= 0)
2676 if (ret == AVERROR_EOF) {
2682 if (decode_failed) {
2683 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2684 ist->file_index, ist->st->index, av_err2str(ret));
2686 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2687 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2689 if (!decode_failed || exit_on_error)
2695 ist->got_output = 1;
2700 // During draining, we might get multiple output frames in this loop.
2701 // ffmpeg.c does not drain the filter chain on configuration changes,
2702 // which means if we send multiple frames at once to the filters, and
2703 // one of those frames changes configuration, the buffered frames will
2704 // be lost. This can upset certain FATE tests.
2705 // Decode only 1 frame per call on EOF to appease these FATE tests.
2706 // The ideal solution would be to rewrite decoding to use the new
2707 // decoding API in a better way.
2714 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2715 /* except when looping we need to flush but not to send an EOF */
2716 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2717 int ret = send_filter_eof(ist);
2719 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2724 /* handle stream copy */
2725 if (!ist->decoding_needed) {
2726 ist->dts = ist->next_dts;
2727 switch (ist->dec_ctx->codec_type) {
2728 case AVMEDIA_TYPE_AUDIO:
2729 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2730 ist->dec_ctx->sample_rate;
2732 case AVMEDIA_TYPE_VIDEO:
2733 if (ist->framerate.num) {
2734 // TODO: Remove work-around for c99-to-c89 issue 7
2735 AVRational time_base_q = AV_TIME_BASE_Q;
2736 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2737 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2738 } else if (pkt->duration) {
2739 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2740 } else if(ist->dec_ctx->framerate.num != 0) {
2741 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2742 ist->next_dts += ((int64_t)AV_TIME_BASE *
2743 ist->dec_ctx->framerate.den * ticks) /
2744 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2748 ist->pts = ist->dts;
2749 ist->next_pts = ist->next_dts;
2751 for (i = 0; pkt && i < nb_output_streams; i++) {
2752 OutputStream *ost = output_streams[i];
2754 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2757 do_streamcopy(ist, ost, pkt);
2760 return !eof_reached;
2763 static void print_sdp(void)
2768 AVIOContext *sdp_pb;
2769 AVFormatContext **avc;
2771 for (i = 0; i < nb_output_files; i++) {
2772 if (!output_files[i]->header_written)
2776 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2779 for (i = 0, j = 0; i < nb_output_files; i++) {
2780 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2781 avc[j] = output_files[i]->ctx;
2789 av_sdp_create(avc, j, sdp, sizeof(sdp));
2791 if (!sdp_filename) {
2792 printf("SDP:\n%s\n", sdp);
2795 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2796 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2798 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2799 avio_closep(&sdp_pb);
2800 av_freep(&sdp_filename);
2808 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2811 for (i = 0; hwaccels[i].name; i++)
2812 if (hwaccels[i].pix_fmt == pix_fmt)
2813 return &hwaccels[i];
2817 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2819 InputStream *ist = s->opaque;
2820 const enum AVPixelFormat *p;
2823 for (p = pix_fmts; *p != -1; p++) {
2824 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2825 const HWAccel *hwaccel;
2827 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2830 hwaccel = get_hwaccel(*p);
2832 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2833 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2836 ret = hwaccel->init(s);
2838 if (ist->hwaccel_id == hwaccel->id) {
2839 av_log(NULL, AV_LOG_FATAL,
2840 "%s hwaccel requested for input stream #%d:%d, "
2841 "but cannot be initialized.\n", hwaccel->name,
2842 ist->file_index, ist->st->index);
2843 return AV_PIX_FMT_NONE;
2848 if (ist->hw_frames_ctx) {
2849 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2850 if (!s->hw_frames_ctx)
2851 return AV_PIX_FMT_NONE;
2854 ist->active_hwaccel_id = hwaccel->id;
2855 ist->hwaccel_pix_fmt = *p;
2862 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2864 InputStream *ist = s->opaque;
2866 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2867 return ist->hwaccel_get_buffer(s, frame, flags);
2869 return avcodec_default_get_buffer2(s, frame, flags);
2872 static int init_input_stream(int ist_index, char *error, int error_len)
2875 InputStream *ist = input_streams[ist_index];
2877 if (ist->decoding_needed) {
2878 AVCodec *codec = ist->dec;
2880 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2881 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2882 return AVERROR(EINVAL);
2885 ist->dec_ctx->opaque = ist;
2886 ist->dec_ctx->get_format = get_format;
2887 ist->dec_ctx->get_buffer2 = get_buffer;
2888 ist->dec_ctx->thread_safe_callbacks = 1;
2890 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2891 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2892 (ist->decoding_needed & DECODING_FOR_OST)) {
2893 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2894 if (ist->decoding_needed & DECODING_FOR_FILTER)
2895 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2898 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2900 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2901 * audio, and video decoders such as cuvid or mediacodec */
2902 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2904 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2905 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2907 ret = hw_device_setup_for_decode(ist);
2909 snprintf(error, error_len, "Device setup failed for "
2910 "decoder on input stream #%d:%d : %s",
2911 ist->file_index, ist->st->index, av_err2str(ret));
2915 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2916 if (ret == AVERROR_EXPERIMENTAL)
2917 abort_codec_experimental(codec, 0);
2919 snprintf(error, error_len,
2920 "Error while opening decoder for input stream "
2922 ist->file_index, ist->st->index, av_err2str(ret));
2925 assert_avoptions(ist->decoder_opts);
2928 ist->next_pts = AV_NOPTS_VALUE;
2929 ist->next_dts = AV_NOPTS_VALUE;
2934 static InputStream *get_input_stream(OutputStream *ost)
2936 if (ost->source_index >= 0)
2937 return input_streams[ost->source_index];
2941 static int compare_int64(const void *a, const void *b)
2943 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2946 /* open the muxer when all the streams are initialized */
2947 static int check_init_output_file(OutputFile *of, int file_index)
2951 for (i = 0; i < of->ctx->nb_streams; i++) {
2952 OutputStream *ost = output_streams[of->ost_index + i];
2953 if (!ost->initialized)
2957 of->ctx->interrupt_callback = int_cb;
2959 ret = avformat_write_header(of->ctx, &of->opts);
2961 av_log(NULL, AV_LOG_ERROR,
2962 "Could not write header for output file #%d "
2963 "(incorrect codec parameters ?): %s\n",
2964 file_index, av_err2str(ret));
2967 //assert_avoptions(of->opts);
2968 of->header_written = 1;
2970 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2972 if (sdp_filename || want_sdp)
2975 /* flush the muxing queues */
2976 for (i = 0; i < of->ctx->nb_streams; i++) {
2977 OutputStream *ost = output_streams[of->ost_index + i];
2979 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2980 if (!av_fifo_size(ost->muxing_queue))
2981 ost->mux_timebase = ost->st->time_base;
2983 while (av_fifo_size(ost->muxing_queue)) {
2985 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2986 write_packet(of, &pkt, ost, 1);
2993 static int init_output_bsfs(OutputStream *ost)
2998 if (!ost->nb_bitstream_filters)
3001 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3002 ctx = ost->bsf_ctx[i];
3004 ret = avcodec_parameters_copy(ctx->par_in,
3005 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3009 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3011 ret = av_bsf_init(ctx);
3013 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3014 ost->bsf_ctx[i]->filter->name);
3019 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3020 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3024 ost->st->time_base = ctx->time_base_out;
3029 static int init_output_stream_streamcopy(OutputStream *ost)
3031 OutputFile *of = output_files[ost->file_index];
3032 InputStream *ist = get_input_stream(ost);
3033 AVCodecParameters *par_dst = ost->st->codecpar;
3034 AVCodecParameters *par_src = ost->ref_par;
3037 uint32_t codec_tag = par_dst->codec_tag;
3039 av_assert0(ist && !ost->filter);
3041 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3043 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3045 av_log(NULL, AV_LOG_FATAL,
3046 "Error setting up codec context options.\n");
3049 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3052 unsigned int codec_tag_tmp;
3053 if (!of->ctx->oformat->codec_tag ||
3054 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3055 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3056 codec_tag = par_src->codec_tag;
3059 ret = avcodec_parameters_copy(par_dst, par_src);
3063 par_dst->codec_tag = codec_tag;
3065 if (!ost->frame_rate.num)
3066 ost->frame_rate = ist->framerate;
3067 ost->st->avg_frame_rate = ost->frame_rate;
3069 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3073 // copy timebase while removing common factors
3074 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3075 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3077 // copy estimated duration as a hint to the muxer
3078 if (ost->st->duration <= 0 && ist->st->duration > 0)
3079 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3082 ost->st->disposition = ist->st->disposition;
3084 if (ist->st->nb_side_data) {
3085 for (i = 0; i < ist->st->nb_side_data; i++) {
3086 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3089 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3091 return AVERROR(ENOMEM);
3092 memcpy(dst_data, sd_src->data, sd_src->size);
3096 if (ost->rotate_overridden) {
3097 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3098 sizeof(int32_t) * 9);
3100 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3103 ost->parser = av_parser_init(par_dst->codec_id);
3104 ost->parser_avctx = avcodec_alloc_context3(NULL);
3105 if (!ost->parser_avctx)
3106 return AVERROR(ENOMEM);
3108 switch (par_dst->codec_type) {
3109 case AVMEDIA_TYPE_AUDIO:
3110 if (audio_volume != 256) {
3111 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3114 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3115 par_dst->block_align= 0;
3116 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3117 par_dst->block_align= 0;
3119 case AVMEDIA_TYPE_VIDEO:
3120 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3122 av_mul_q(ost->frame_aspect_ratio,
3123 (AVRational){ par_dst->height, par_dst->width });
3124 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3125 "with stream copy may produce invalid files\n");
3127 else if (ist->st->sample_aspect_ratio.num)
3128 sar = ist->st->sample_aspect_ratio;
3130 sar = par_src->sample_aspect_ratio;
3131 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3132 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3133 ost->st->r_frame_rate = ist->st->r_frame_rate;
3137 ost->mux_timebase = ist->st->time_base;
3142 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3144 AVDictionaryEntry *e;
3146 uint8_t *encoder_string;
3147 int encoder_string_len;
3148 int format_flags = 0;
3149 int codec_flags = 0;
3151 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3154 e = av_dict_get(of->opts, "fflags", NULL, 0);
3156 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3159 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3161 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3163 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3166 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3169 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3170 encoder_string = av_mallocz(encoder_string_len);
3171 if (!encoder_string)
3174 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3175 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3177 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3178 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3179 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3180 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3183 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3184 AVCodecContext *avctx)
3187 int n = 1, i, size, index = 0;
3190 for (p = kf; *p; p++)
3194 pts = av_malloc_array(size, sizeof(*pts));
3196 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3201 for (i = 0; i < n; i++) {
3202 char *next = strchr(p, ',');
3207 if (!memcmp(p, "chapters", 8)) {
3209 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3212 if (avf->nb_chapters > INT_MAX - size ||
3213 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3215 av_log(NULL, AV_LOG_FATAL,
3216 "Could not allocate forced key frames array.\n");
3219 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3220 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3222 for (j = 0; j < avf->nb_chapters; j++) {
3223 AVChapter *c = avf->chapters[j];
3224 av_assert1(index < size);
3225 pts[index++] = av_rescale_q(c->start, c->time_base,
3226 avctx->time_base) + t;
3231 t = parse_time_or_die("force_key_frames", p, 1);
3232 av_assert1(index < size);
3233 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3240 av_assert0(index == size);
3241 qsort(pts, size, sizeof(*pts), compare_int64);
3242 ost->forced_kf_count = size;
3243 ost->forced_kf_pts = pts;
3246 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3248 InputStream *ist = get_input_stream(ost);
3249 AVCodecContext *enc_ctx = ost->enc_ctx;
3250 AVFormatContext *oc;
3252 if (ost->enc_timebase.num > 0) {
3253 enc_ctx->time_base = ost->enc_timebase;
3257 if (ost->enc_timebase.num < 0) {
3259 enc_ctx->time_base = ist->st->time_base;
3263 oc = output_files[ost->file_index]->ctx;
3264 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3267 enc_ctx->time_base = default_time_base;
3270 static int init_output_stream_encode(OutputStream *ost)
3272 InputStream *ist = get_input_stream(ost);
3273 AVCodecContext *enc_ctx = ost->enc_ctx;
3274 AVCodecContext *dec_ctx = NULL;
3275 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3278 set_encoder_id(output_files[ost->file_index], ost);
3280 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3281 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3282 // which have to be filtered out to prevent leaking them to output files.
3283 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3286 ost->st->disposition = ist->st->disposition;
3288 dec_ctx = ist->dec_ctx;
3290 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3292 for (j = 0; j < oc->nb_streams; j++) {
3293 AVStream *st = oc->streams[j];
3294 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3297 if (j == oc->nb_streams)
3298 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3299 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3300 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3303 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3304 if (!ost->frame_rate.num)
3305 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3306 if (ist && !ost->frame_rate.num)
3307 ost->frame_rate = ist->framerate;
3308 if (ist && !ost->frame_rate.num)
3309 ost->frame_rate = ist->st->r_frame_rate;
3310 if (ist && !ost->frame_rate.num) {
3311 ost->frame_rate = (AVRational){25, 1};
3312 av_log(NULL, AV_LOG_WARNING,
3314 "about the input framerate is available. Falling "
3315 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3316 "if you want a different framerate.\n",
3317 ost->file_index, ost->index);
3319 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3320 if (ost->enc->supported_framerates && !ost->force_fps) {
3321 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3322 ost->frame_rate = ost->enc->supported_framerates[idx];
3324 // reduce frame rate for mpeg4 to be within the spec limits
3325 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3326 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3327 ost->frame_rate.num, ost->frame_rate.den, 65535);
3331 switch (enc_ctx->codec_type) {
3332 case AVMEDIA_TYPE_AUDIO:
3333 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3335 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3336 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3337 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3338 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3339 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3341 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3344 case AVMEDIA_TYPE_VIDEO:
3345 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3347 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3348 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3349 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3350 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3351 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3352 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3354 for (j = 0; j < ost->forced_kf_count; j++)
3355 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3357 enc_ctx->time_base);
3359 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3360 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3361 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3362 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3363 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3364 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3366 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3368 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3369 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3371 enc_ctx->framerate = ost->frame_rate;
3373 ost->st->avg_frame_rate = ost->frame_rate;
3376 enc_ctx->width != dec_ctx->width ||
3377 enc_ctx->height != dec_ctx->height ||
3378 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3379 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3382 if (ost->forced_keyframes) {
3383 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3384 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3385 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3387 av_log(NULL, AV_LOG_ERROR,
3388 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3391 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3392 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3393 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3394 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3396 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3397 // parse it only for static kf timings
3398 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3399 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3403 case AVMEDIA_TYPE_SUBTITLE:
3404 enc_ctx->time_base = AV_TIME_BASE_Q;
3405 if (!enc_ctx->width) {
3406 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3407 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3410 case AVMEDIA_TYPE_DATA:
3417 ost->mux_timebase = enc_ctx->time_base;
3422 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3426 if (ost->encoding_needed) {
3427 AVCodec *codec = ost->enc;
3428 AVCodecContext *dec = NULL;
3431 ret = init_output_stream_encode(ost);
3435 if ((ist = get_input_stream(ost)))
3437 if (dec && dec->subtitle_header) {
3438 /* ASS code assumes this buffer is null terminated so add extra byte. */
3439 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3440 if (!ost->enc_ctx->subtitle_header)
3441 return AVERROR(ENOMEM);
3442 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3443 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3445 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3446 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3447 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3449 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3450 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3451 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3453 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3454 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3455 av_buffersink_get_format(ost->filter->filter)) {
3456 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3457 if (!ost->enc_ctx->hw_frames_ctx)
3458 return AVERROR(ENOMEM);
3460 ret = hw_device_setup_for_encode(ost);
3462 snprintf(error, error_len, "Device setup failed for "
3463 "encoder on output stream #%d:%d : %s",
3464 ost->file_index, ost->index, av_err2str(ret));
3469 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3470 if (ret == AVERROR_EXPERIMENTAL)
3471 abort_codec_experimental(codec, 1);
3472 snprintf(error, error_len,
3473 "Error while opening encoder for output stream #%d:%d - "
3474 "maybe incorrect parameters such as bit_rate, rate, width or height",
3475 ost->file_index, ost->index);
3478 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3479 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3480 av_buffersink_set_frame_size(ost->filter->filter,
3481 ost->enc_ctx->frame_size);
3482 assert_avoptions(ost->encoder_opts);
3483 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3484 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3485 " It takes bits/s as argument, not kbits/s\n");
3487 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3489 av_log(NULL, AV_LOG_FATAL,
3490 "Error initializing the output stream codec context.\n");
3494 * FIXME: ost->st->codec should't be needed here anymore.
3496 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3500 if (ost->enc_ctx->nb_coded_side_data) {
3503 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3504 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3507 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3509 return AVERROR(ENOMEM);
3510 memcpy(dst_data, sd_src->data, sd_src->size);
3515 * Add global input side data. For now this is naive, and copies it
3516 * from the input stream's global side data. All side data should
3517 * really be funneled over AVFrame and libavfilter, then added back to
3518 * packet side data, and then potentially using the first packet for
3523 for (i = 0; i < ist->st->nb_side_data; i++) {
3524 AVPacketSideData *sd = &ist->st->side_data[i];
3525 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3527 return AVERROR(ENOMEM);
3528 memcpy(dst, sd->data, sd->size);
3529 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3530 av_display_rotation_set((uint32_t *)dst, 0);
3534 // copy timebase while removing common factors
3535 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3536 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3538 // copy estimated duration as a hint to the muxer
3539 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3540 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3542 ost->st->codec->codec= ost->enc_ctx->codec;
3543 } else if (ost->stream_copy) {
3544 ret = init_output_stream_streamcopy(ost);
3549 * FIXME: will the codec context used by the parser during streamcopy
3550 * This should go away with the new parser API.
3552 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3557 // parse user provided disposition, and update stream values
3558 if (ost->disposition) {
3559 static const AVOption opts[] = {
3560 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3561 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3562 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3563 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3564 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3565 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3566 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3567 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3568 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3569 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3570 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3571 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3572 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3573 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3576 static const AVClass class = {
3578 .item_name = av_default_item_name,
3580 .version = LIBAVUTIL_VERSION_INT,
3582 const AVClass *pclass = &class;
3584 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3589 /* initialize bitstream filters for the output stream
3590 * needs to be done here, because the codec id for streamcopy is not
3591 * known until now */
3592 ret = init_output_bsfs(ost);
3596 ost->initialized = 1;
3598 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3605 static void report_new_stream(int input_index, AVPacket *pkt)
3607 InputFile *file = input_files[input_index];
3608 AVStream *st = file->ctx->streams[pkt->stream_index];
3610 if (pkt->stream_index < file->nb_streams_warn)
3612 av_log(file->ctx, AV_LOG_WARNING,
3613 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3614 av_get_media_type_string(st->codecpar->codec_type),
3615 input_index, pkt->stream_index,
3616 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3617 file->nb_streams_warn = pkt->stream_index + 1;
3620 static int transcode_init(void)
3622 int ret = 0, i, j, k;
3623 AVFormatContext *oc;
3626 char error[1024] = {0};
3628 for (i = 0; i < nb_filtergraphs; i++) {
3629 FilterGraph *fg = filtergraphs[i];
3630 for (j = 0; j < fg->nb_outputs; j++) {
3631 OutputFilter *ofilter = fg->outputs[j];
3632 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3634 if (fg->nb_inputs != 1)
3636 for (k = nb_input_streams-1; k >= 0 ; k--)
3637 if (fg->inputs[0]->ist == input_streams[k])
3639 ofilter->ost->source_index = k;
3643 /* init framerate emulation */
3644 for (i = 0; i < nb_input_files; i++) {
3645 InputFile *ifile = input_files[i];
3646 if (ifile->rate_emu)
3647 for (j = 0; j < ifile->nb_streams; j++)
3648 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3651 /* init input streams */
3652 for (i = 0; i < nb_input_streams; i++)
3653 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3654 for (i = 0; i < nb_output_streams; i++) {
3655 ost = output_streams[i];
3656 avcodec_close(ost->enc_ctx);
3661 /* open each encoder */
3662 for (i = 0; i < nb_output_streams; i++) {
3663 // skip streams fed from filtergraphs until we have a frame for them
3664 if (output_streams[i]->filter)
3667 ret = init_output_stream(output_streams[i], error, sizeof(error));
3672 /* discard unused programs */
3673 for (i = 0; i < nb_input_files; i++) {
3674 InputFile *ifile = input_files[i];
3675 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3676 AVProgram *p = ifile->ctx->programs[j];
3677 int discard = AVDISCARD_ALL;
3679 for (k = 0; k < p->nb_stream_indexes; k++)
3680 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3681 discard = AVDISCARD_DEFAULT;
3684 p->discard = discard;
3688 /* write headers for files with no streams */
3689 for (i = 0; i < nb_output_files; i++) {
3690 oc = output_files[i]->ctx;
3691 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3692 ret = check_init_output_file(output_files[i], i);
3699 /* dump the stream mapping */
3700 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3701 for (i = 0; i < nb_input_streams; i++) {
3702 ist = input_streams[i];
3704 for (j = 0; j < ist->nb_filters; j++) {
3705 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3706 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3707 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3708 ist->filters[j]->name);
3709 if (nb_filtergraphs > 1)
3710 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3711 av_log(NULL, AV_LOG_INFO, "\n");
3716 for (i = 0; i < nb_output_streams; i++) {
3717 ost = output_streams[i];
3719 if (ost->attachment_filename) {
3720 /* an attached file */
3721 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3722 ost->attachment_filename, ost->file_index, ost->index);
3726 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3727 /* output from a complex graph */
3728 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3729 if (nb_filtergraphs > 1)
3730 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3732 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3733 ost->index, ost->enc ? ost->enc->name : "?");
3737 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3738 input_streams[ost->source_index]->file_index,
3739 input_streams[ost->source_index]->st->index,
3742 if (ost->sync_ist != input_streams[ost->source_index])
3743 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3744 ost->sync_ist->file_index,
3745 ost->sync_ist->st->index);
3746 if (ost->stream_copy)
3747 av_log(NULL, AV_LOG_INFO, " (copy)");
3749 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3750 const AVCodec *out_codec = ost->enc;
3751 const char *decoder_name = "?";
3752 const char *in_codec_name = "?";
3753 const char *encoder_name = "?";
3754 const char *out_codec_name = "?";
3755 const AVCodecDescriptor *desc;
3758 decoder_name = in_codec->name;
3759 desc = avcodec_descriptor_get(in_codec->id);
3761 in_codec_name = desc->name;
3762 if (!strcmp(decoder_name, in_codec_name))
3763 decoder_name = "native";
3767 encoder_name = out_codec->name;
3768 desc = avcodec_descriptor_get(out_codec->id);
3770 out_codec_name = desc->name;
3771 if (!strcmp(encoder_name, out_codec_name))
3772 encoder_name = "native";
3775 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3776 in_codec_name, decoder_name,
3777 out_codec_name, encoder_name);
3779 av_log(NULL, AV_LOG_INFO, "\n");
3783 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3787 atomic_store(&transcode_init_done, 1);
3792 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3793 static int need_output(void)
3797 for (i = 0; i < nb_output_streams; i++) {
3798 OutputStream *ost = output_streams[i];
3799 OutputFile *of = output_files[ost->file_index];
3800 AVFormatContext *os = output_files[ost->file_index]->ctx;
3802 if (ost->finished ||
3803 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3805 if (ost->frame_number >= ost->max_frames) {
3807 for (j = 0; j < of->ctx->nb_streams; j++)
3808 close_output_stream(output_streams[of->ost_index + j]);
3819 * Select the output stream to process.
3821 * @return selected output stream, or NULL if none available
3823 static OutputStream *choose_output(void)
3826 int64_t opts_min = INT64_MAX;
3827 OutputStream *ost_min = NULL;
3829 for (i = 0; i < nb_output_streams; i++) {
3830 OutputStream *ost = output_streams[i];
3831 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3832 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3834 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3835 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3837 if (!ost->initialized && !ost->inputs_done)
3840 if (!ost->finished && opts < opts_min) {
3842 ost_min = ost->unavailable ? NULL : ost;
3848 static void set_tty_echo(int on)
3852 if (tcgetattr(0, &tty) == 0) {
3853 if (on) tty.c_lflag |= ECHO;
3854 else tty.c_lflag &= ~ECHO;
3855 tcsetattr(0, TCSANOW, &tty);
3860 static int check_keyboard_interaction(int64_t cur_time)
3863 static int64_t last_time;
3864 if (received_nb_signals)
3865 return AVERROR_EXIT;
3866 /* read_key() returns 0 on EOF */
3867 if(cur_time - last_time >= 100000 && !run_as_daemon){
3869 last_time = cur_time;
3873 return AVERROR_EXIT;
3874 if (key == '+') av_log_set_level(av_log_get_level()+10);
3875 if (key == '-') av_log_set_level(av_log_get_level()-10);
3876 if (key == 's') qp_hist ^= 1;
3879 do_hex_dump = do_pkt_dump = 0;
3880 } else if(do_pkt_dump){
3884 av_log_set_level(AV_LOG_DEBUG);
3886 if (key == 'c' || key == 'C'){
3887 char buf[4096], target[64], command[256], arg[256] = {0};
3890 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3893 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3898 fprintf(stderr, "\n");
3900 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3901 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3902 target, time, command, arg);
3903 for (i = 0; i < nb_filtergraphs; i++) {
3904 FilterGraph *fg = filtergraphs[i];
3907 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3908 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3909 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3910 } else if (key == 'c') {
3911 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3912 ret = AVERROR_PATCHWELCOME;
3914 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3916 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3921 av_log(NULL, AV_LOG_ERROR,
3922 "Parse error, at least 3 arguments were expected, "
3923 "only %d given in string '%s'\n", n, buf);
3926 if (key == 'd' || key == 'D'){
3929 debug = input_streams[0]->st->codec->debug<<1;
3930 if(!debug) debug = 1;
3931 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3938 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3943 fprintf(stderr, "\n");
3944 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3945 fprintf(stderr,"error parsing debug value\n");
3947 for(i=0;i<nb_input_streams;i++) {
3948 input_streams[i]->st->codec->debug = debug;
3950 for(i=0;i<nb_output_streams;i++) {
3951 OutputStream *ost = output_streams[i];
3952 ost->enc_ctx->debug = debug;
3954 if(debug) av_log_set_level(AV_LOG_DEBUG);
3955 fprintf(stderr,"debug=%d\n", debug);
3958 fprintf(stderr, "key function\n"
3959 "? show this help\n"
3960 "+ increase verbosity\n"
3961 "- decrease verbosity\n"
3962 "c Send command to first matching filter supporting it\n"
3963 "C Send/Queue command to all matching filters\n"
3964 "D cycle through available debug modes\n"
3965 "h dump packets/hex press to cycle through the 3 states\n"
3967 "s Show QP histogram\n"
3974 static void *input_thread(void *arg)
3977 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3982 ret = av_read_frame(f->ctx, &pkt);
3984 if (ret == AVERROR(EAGAIN)) {
3989 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3992 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3993 if (flags && ret == AVERROR(EAGAIN)) {
3995 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3996 av_log(f->ctx, AV_LOG_WARNING,
3997 "Thread message queue blocking; consider raising the "
3998 "thread_queue_size option (current value: %d)\n",
3999 f->thread_queue_size);
4002 if (ret != AVERROR_EOF)
4003 av_log(f->ctx, AV_LOG_ERROR,
4004 "Unable to send packet to main thread: %s\n",
4006 av_packet_unref(&pkt);
4007 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4015 static void free_input_threads(void)
4019 for (i = 0; i < nb_input_files; i++) {
4020 InputFile *f = input_files[i];
4023 if (!f || !f->in_thread_queue)
4025 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4026 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4027 av_packet_unref(&pkt);
4029 pthread_join(f->thread, NULL);
4031 av_thread_message_queue_free(&f->in_thread_queue);
4035 static int init_input_threads(void)
4039 if (nb_input_files == 1)
4042 for (i = 0; i < nb_input_files; i++) {
4043 InputFile *f = input_files[i];
4045 if (f->ctx->pb ? !f->ctx->pb->seekable :
4046 strcmp(f->ctx->iformat->name, "lavfi"))
4047 f->non_blocking = 1;
4048 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4049 f->thread_queue_size, sizeof(AVPacket));
4053 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4054 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4055 av_thread_message_queue_free(&f->in_thread_queue);
4056 return AVERROR(ret);
4062 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4064 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4066 AV_THREAD_MESSAGE_NONBLOCK : 0);
4070 static int get_input_packet(InputFile *f, AVPacket *pkt)
4074 for (i = 0; i < f->nb_streams; i++) {
4075 InputStream *ist = input_streams[f->ist_index + i];
4076 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4077 int64_t now = av_gettime_relative() - ist->start;
4079 return AVERROR(EAGAIN);
4084 if (nb_input_files > 1)
4085 return get_input_packet_mt(f, pkt);
4087 return av_read_frame(f->ctx, pkt);
4090 static int got_eagain(void)
4093 for (i = 0; i < nb_output_streams; i++)
4094 if (output_streams[i]->unavailable)
4099 static void reset_eagain(void)
4102 for (i = 0; i < nb_input_files; i++)
4103 input_files[i]->eagain = 0;
4104 for (i = 0; i < nb_output_streams; i++)
4105 output_streams[i]->unavailable = 0;
4108 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4109 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4110 AVRational time_base)
4116 return tmp_time_base;
4119 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4122 return tmp_time_base;
4128 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4131 AVCodecContext *avctx;
4132 int i, ret, has_audio = 0;
4133 int64_t duration = 0;
4135 ret = av_seek_frame(is, -1, is->start_time, 0);
4139 for (i = 0; i < ifile->nb_streams; i++) {
4140 ist = input_streams[ifile->ist_index + i];
4141 avctx = ist->dec_ctx;
4144 if (ist->decoding_needed) {
4145 process_input_packet(ist, NULL, 1);
4146 avcodec_flush_buffers(avctx);
4149 /* duration is the length of the last frame in a stream
4150 * when audio stream is present we don't care about
4151 * last video frame length because it's not defined exactly */
4152 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4156 for (i = 0; i < ifile->nb_streams; i++) {
4157 ist = input_streams[ifile->ist_index + i];
4158 avctx = ist->dec_ctx;
4161 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4162 AVRational sample_rate = {1, avctx->sample_rate};
4164 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4168 if (ist->framerate.num) {
4169 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4170 } else if (ist->st->avg_frame_rate.num) {
4171 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4172 } else duration = 1;
4174 if (!ifile->duration)
4175 ifile->time_base = ist->st->time_base;
4176 /* the total duration of the stream, max_pts - min_pts is
4177 * the duration of the stream without the last frame */
4178 duration += ist->max_pts - ist->min_pts;
4179 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4183 if (ifile->loop > 0)
4191 * - 0 -- one packet was read and processed
4192 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4193 * this function should be called again
4194 * - AVERROR_EOF -- this function should not be called again
4196 static int process_input(int file_index)
4198 InputFile *ifile = input_files[file_index];
4199 AVFormatContext *is;
4207 ret = get_input_packet(ifile, &pkt);
4209 if (ret == AVERROR(EAGAIN)) {
4213 if (ret < 0 && ifile->loop) {
4214 if ((ret = seek_to_start(ifile, is)) < 0)
4216 ret = get_input_packet(ifile, &pkt);
4217 if (ret == AVERROR(EAGAIN)) {
4223 if (ret != AVERROR_EOF) {
4224 print_error(is->filename, ret);
4229 for (i = 0; i < ifile->nb_streams; i++) {
4230 ist = input_streams[ifile->ist_index + i];
4231 if (ist->decoding_needed) {
4232 ret = process_input_packet(ist, NULL, 0);
4237 /* mark all outputs that don't go through lavfi as finished */
4238 for (j = 0; j < nb_output_streams; j++) {
4239 OutputStream *ost = output_streams[j];
4241 if (ost->source_index == ifile->ist_index + i &&
4242 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4243 finish_output_stream(ost);
4247 ifile->eof_reached = 1;
4248 return AVERROR(EAGAIN);
4254 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4255 is->streams[pkt.stream_index]);
4257 /* the following test is needed in case new streams appear
4258 dynamically in stream : we ignore them */
4259 if (pkt.stream_index >= ifile->nb_streams) {
4260 report_new_stream(file_index, &pkt);
4261 goto discard_packet;
4264 ist = input_streams[ifile->ist_index + pkt.stream_index];
4266 ist->data_size += pkt.size;
4270 goto discard_packet;
4272 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4273 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4278 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4279 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4280 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4281 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4282 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4283 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4284 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4285 av_ts2str(input_files[ist->file_index]->ts_offset),
4286 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4289 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4290 int64_t stime, stime2;
4291 // Correcting starttime based on the enabled streams
4292 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4293 // so we instead do it here as part of discontinuity handling
4294 if ( ist->next_dts == AV_NOPTS_VALUE
4295 && ifile->ts_offset == -is->start_time
4296 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4297 int64_t new_start_time = INT64_MAX;
4298 for (i=0; i<is->nb_streams; i++) {
4299 AVStream *st = is->streams[i];
4300 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4302 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4304 if (new_start_time > is->start_time) {
4305 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4306 ifile->ts_offset = -new_start_time;
4310 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4311 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4312 ist->wrap_correction_done = 1;
4314 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4315 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4316 ist->wrap_correction_done = 0;
4318 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4319 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4320 ist->wrap_correction_done = 0;
4324 /* add the stream-global side data to the first packet */
4325 if (ist->nb_packets == 1) {
4326 for (i = 0; i < ist->st->nb_side_data; i++) {
4327 AVPacketSideData *src_sd = &ist->st->side_data[i];
4330 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4333 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4336 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4340 memcpy(dst_data, src_sd->data, src_sd->size);
4344 if (pkt.dts != AV_NOPTS_VALUE)
4345 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4346 if (pkt.pts != AV_NOPTS_VALUE)
4347 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4349 if (pkt.pts != AV_NOPTS_VALUE)
4350 pkt.pts *= ist->ts_scale;
4351 if (pkt.dts != AV_NOPTS_VALUE)
4352 pkt.dts *= ist->ts_scale;
4354 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4355 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4356 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4357 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4358 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4359 int64_t delta = pkt_dts - ifile->last_ts;
4360 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4361 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4362 ifile->ts_offset -= delta;
4363 av_log(NULL, AV_LOG_DEBUG,
4364 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4365 delta, ifile->ts_offset);
4366 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4367 if (pkt.pts != AV_NOPTS_VALUE)
4368 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4372 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4373 if (pkt.pts != AV_NOPTS_VALUE) {
4374 pkt.pts += duration;
4375 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4376 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4379 if (pkt.dts != AV_NOPTS_VALUE)
4380 pkt.dts += duration;
4382 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4383 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4384 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4385 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4387 int64_t delta = pkt_dts - ist->next_dts;
4388 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4389 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4390 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4391 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4392 ifile->ts_offset -= delta;
4393 av_log(NULL, AV_LOG_DEBUG,
4394 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4395 delta, ifile->ts_offset);
4396 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4397 if (pkt.pts != AV_NOPTS_VALUE)
4398 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4401 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4402 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4403 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4404 pkt.dts = AV_NOPTS_VALUE;
4406 if (pkt.pts != AV_NOPTS_VALUE){
4407 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4408 delta = pkt_pts - ist->next_dts;
4409 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4410 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4411 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4412 pkt.pts = AV_NOPTS_VALUE;
4418 if (pkt.dts != AV_NOPTS_VALUE)
4419 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4422 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4423 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4424 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4425 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4426 av_ts2str(input_files[ist->file_index]->ts_offset),
4427 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4430 sub2video_heartbeat(ist, pkt.pts);
4432 process_input_packet(ist, &pkt, 0);
4435 av_packet_unref(&pkt);
4441 * Perform a step of transcoding for the specified filter graph.
4443 * @param[in] graph filter graph to consider
4444 * @param[out] best_ist input stream where a frame would allow to continue
4445 * @return 0 for success, <0 for error
4447 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4450 int nb_requests, nb_requests_max = 0;
4451 InputFilter *ifilter;
4455 ret = avfilter_graph_request_oldest(graph->graph);
4457 return reap_filters(0);
4459 if (ret == AVERROR_EOF) {
4460 ret = reap_filters(1);
4461 for (i = 0; i < graph->nb_outputs; i++)
4462 close_output_stream(graph->outputs[i]->ost);
4465 if (ret != AVERROR(EAGAIN))
4468 for (i = 0; i < graph->nb_inputs; i++) {
4469 ifilter = graph->inputs[i];
4471 if (input_files[ist->file_index]->eagain ||
4472 input_files[ist->file_index]->eof_reached)
4474 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4475 if (nb_requests > nb_requests_max) {
4476 nb_requests_max = nb_requests;
4482 for (i = 0; i < graph->nb_outputs; i++)
4483 graph->outputs[i]->ost->unavailable = 1;
4489 * Run a single step of transcoding.
4491 * @return 0 for success, <0 for error
4493 static int transcode_step(void)
4496 InputStream *ist = NULL;
4499 ost = choose_output();
4506 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4510 if (ost->filter && !ost->filter->graph->graph) {
4511 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4512 ret = configure_filtergraph(ost->filter->graph);
4514 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4520 if (ost->filter && ost->filter->graph->graph) {
4521 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4525 } else if (ost->filter) {
4527 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4528 InputFilter *ifilter = ost->filter->graph->inputs[i];
4529 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4535 ost->inputs_done = 1;
4539 av_assert0(ost->source_index >= 0);
4540 ist = input_streams[ost->source_index];
4543 ret = process_input(ist->file_index);
4544 if (ret == AVERROR(EAGAIN)) {
4545 if (input_files[ist->file_index]->eagain)
4546 ost->unavailable = 1;
4551 return ret == AVERROR_EOF ? 0 : ret;
4553 return reap_filters(0);
4557 * The following code is the main loop of the file converter
4559 static int transcode(void)
4562 AVFormatContext *os;
4565 int64_t timer_start;
4566 int64_t total_packets_written = 0;
4568 ret = transcode_init();
4572 if (stdin_interaction) {
4573 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4576 timer_start = av_gettime_relative();
4579 if ((ret = init_input_threads()) < 0)
4583 while (!received_sigterm) {
4584 int64_t cur_time= av_gettime_relative();
4586 /* if 'q' pressed, exits */
4587 if (stdin_interaction)
4588 if (check_keyboard_interaction(cur_time) < 0)
4591 /* check if there's any stream where output is still needed */
4592 if (!need_output()) {
4593 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4597 ret = transcode_step();
4598 if (ret < 0 && ret != AVERROR_EOF) {
4600 av_strerror(ret, errbuf, sizeof(errbuf));
4602 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4606 /* dump report by using the output first video and audio streams */
4607 print_report(0, timer_start, cur_time);
4610 free_input_threads();
4613 /* at the end of stream, we must flush the decoder buffers */
4614 for (i = 0; i < nb_input_streams; i++) {
4615 ist = input_streams[i];
4616 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4617 process_input_packet(ist, NULL, 0);
4624 /* write the trailer if needed and close file */
4625 for (i = 0; i < nb_output_files; i++) {
4626 os = output_files[i]->ctx;
4627 if (!output_files[i]->header_written) {
4628 av_log(NULL, AV_LOG_ERROR,
4629 "Nothing was written into output file %d (%s), because "
4630 "at least one of its streams received no packets.\n",
4634 if ((ret = av_write_trailer(os)) < 0) {
4635 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4641 /* dump report by using the first video and audio streams */
4642 print_report(1, timer_start, av_gettime_relative());
4644 /* close each encoder */
4645 for (i = 0; i < nb_output_streams; i++) {
4646 ost = output_streams[i];
4647 if (ost->encoding_needed) {
4648 av_freep(&ost->enc_ctx->stats_in);
4650 total_packets_written += ost->packets_written;
4653 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4654 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4658 /* close each decoder */
4659 for (i = 0; i < nb_input_streams; i++) {
4660 ist = input_streams[i];
4661 if (ist->decoding_needed) {
4662 avcodec_close(ist->dec_ctx);
4663 if (ist->hwaccel_uninit)
4664 ist->hwaccel_uninit(ist->dec_ctx);
4668 av_buffer_unref(&hw_device_ctx);
4669 hw_device_free_all();
4676 free_input_threads();
4679 if (output_streams) {
4680 for (i = 0; i < nb_output_streams; i++) {
4681 ost = output_streams[i];
4684 if (fclose(ost->logfile))
4685 av_log(NULL, AV_LOG_ERROR,
4686 "Error closing logfile, loss of information possible: %s\n",
4687 av_err2str(AVERROR(errno)));
4688 ost->logfile = NULL;
4690 av_freep(&ost->forced_kf_pts);
4691 av_freep(&ost->apad);
4692 av_freep(&ost->disposition);
4693 av_dict_free(&ost->encoder_opts);
4694 av_dict_free(&ost->sws_dict);
4695 av_dict_free(&ost->swr_opts);
4696 av_dict_free(&ost->resample_opts);
4704 static int64_t getutime(void)
4707 struct rusage rusage;
4709 getrusage(RUSAGE_SELF, &rusage);
4710 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4711 #elif HAVE_GETPROCESSTIMES
4713 FILETIME c, e, k, u;
4714 proc = GetCurrentProcess();
4715 GetProcessTimes(proc, &c, &e, &k, &u);
4716 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4718 return av_gettime_relative();
4722 static int64_t getmaxrss(void)
4724 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4725 struct rusage rusage;
4726 getrusage(RUSAGE_SELF, &rusage);
4727 return (int64_t)rusage.ru_maxrss * 1024;
4728 #elif HAVE_GETPROCESSMEMORYINFO
4730 PROCESS_MEMORY_COUNTERS memcounters;
4731 proc = GetCurrentProcess();
4732 memcounters.cb = sizeof(memcounters);
4733 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4734 return memcounters.PeakPagefileUsage;
4740 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4744 int main(int argc, char **argv)
4751 register_exit(ffmpeg_cleanup);
4753 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4755 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4756 parse_loglevel(argc, argv, options);
4758 if(argc>1 && !strcmp(argv[1], "-d")){
4760 av_log_set_callback(log_callback_null);
4765 avcodec_register_all();
4767 avdevice_register_all();
4769 avfilter_register_all();
4771 avformat_network_init();
4773 show_banner(argc, argv, options);
4775 /* parse options and open all input/output files */
4776 ret = ffmpeg_parse_options(argc, argv);
4780 if (nb_output_files <= 0 && nb_input_files == 0) {
4782 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4786 /* file converter / grab */
4787 if (nb_output_files <= 0) {
4788 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4792 // if (nb_input_files == 0) {
4793 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4797 for (i = 0; i < nb_output_files; i++) {
4798 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4802 current_time = ti = getutime();
4803 if (transcode() < 0)
4805 ti = getutime() - ti;
4807 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4809 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4810 decode_error_stat[0], decode_error_stat[1]);
4811 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4814 exit_program(received_nb_signals ? 255 : main_return_code);
4815 return main_return_code;