2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
225 av_assert1(frame->data[0]);
226 ist->sub2video.last_pts = frame->pts = pts;
227 for (i = 0; i < ist->nb_filters; i++) {
228 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
229 AV_BUFFERSRC_FLAG_KEEP_REF |
230 AV_BUFFERSRC_FLAG_PUSH);
231 if (ret != AVERROR_EOF && ret < 0)
232 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237 void sub2video_update(InputStream *ist, AVSubtitle *sub)
239 AVFrame *frame = ist->sub2video.frame;
243 int64_t pts, end_pts;
248 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
249 AV_TIME_BASE_Q, ist->st->time_base);
250 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
251 AV_TIME_BASE_Q, ist->st->time_base);
252 num_rects = sub->num_rects;
254 pts = ist->sub2video.end_pts;
258 if (sub2video_get_blank_frame(ist) < 0) {
259 av_log(ist->dec_ctx, AV_LOG_ERROR,
260 "Impossible to get a blank canvas.\n");
263 dst = frame->data [0];
264 dst_linesize = frame->linesize[0];
265 for (i = 0; i < num_rects; i++)
266 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
267 sub2video_push_ref(ist, pts);
268 ist->sub2video.end_pts = end_pts;
271 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
273 InputFile *infile = input_files[ist->file_index];
277 /* When a frame is read from a file, examine all sub2video streams in
278 the same file and send the sub2video frame again. Otherwise, decoded
279 video frames could be accumulating in the filter graph while a filter
280 (possibly overlay) is desperately waiting for a subtitle frame. */
281 for (i = 0; i < infile->nb_streams; i++) {
282 InputStream *ist2 = input_streams[infile->ist_index + i];
283 if (!ist2->sub2video.frame)
285 /* subtitles seem to be usually muxed ahead of other streams;
286 if not, subtracting a larger time here is necessary */
287 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
288 /* do not send the heartbeat frame if the subtitle is already ahead */
289 if (pts2 <= ist2->sub2video.last_pts)
291 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
292 sub2video_update(ist2, NULL);
293 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
294 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
296 sub2video_push_ref(ist2, pts2);
300 static void sub2video_flush(InputStream *ist)
305 if (ist->sub2video.end_pts < INT64_MAX)
306 sub2video_update(ist, NULL);
307 for (i = 0; i < ist->nb_filters; i++) {
308 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
309 if (ret != AVERROR_EOF && ret < 0)
310 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
314 /* end of sub2video hack */
316 static void term_exit_sigsafe(void)
320 tcsetattr (0, TCSANOW, &oldtty);
326 av_log(NULL, AV_LOG_QUIET, "%s", "");
330 static volatile int received_sigterm = 0;
331 static volatile int received_nb_signals = 0;
332 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
333 static volatile int ffmpeg_exited = 0;
334 static int main_return_code = 0;
337 sigterm_handler(int sig)
340 received_sigterm = sig;
341 received_nb_signals++;
343 if(received_nb_signals > 3) {
344 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
345 strlen("Received > 3 system signals, hard exiting\n"));
346 if (ret < 0) { /* Do nothing */ };
351 #if HAVE_SETCONSOLECTRLHANDLER
352 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
354 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 case CTRL_BREAK_EVENT:
360 sigterm_handler(SIGINT);
363 case CTRL_CLOSE_EVENT:
364 case CTRL_LOGOFF_EVENT:
365 case CTRL_SHUTDOWN_EVENT:
366 sigterm_handler(SIGTERM);
367 /* Basically, with these 3 events, when we return from this method the
368 process is hard terminated, so stall as long as we need to
369 to try and let the main thread(s) clean up and gracefully terminate
370 (we have at most 5 seconds, but should be done far before that). */
371 while (!ffmpeg_exited) {
377 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
386 if (!run_as_daemon && stdin_interaction) {
388 if (tcgetattr (0, &tty) == 0) {
392 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
393 |INLCR|IGNCR|ICRNL|IXON);
394 tty.c_oflag |= OPOST;
395 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
396 tty.c_cflag &= ~(CSIZE|PARENB);
401 tcsetattr (0, TCSANOW, &tty);
403 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
407 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
408 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
410 signal(SIGXCPU, sigterm_handler);
412 #if HAVE_SETCONSOLECTRLHANDLER
413 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
417 /* read a key without blocking */
418 static int read_key(void)
430 n = select(1, &rfds, NULL, NULL, &tv);
439 # if HAVE_PEEKNAMEDPIPE
441 static HANDLE input_handle;
444 input_handle = GetStdHandle(STD_INPUT_HANDLE);
445 is_pipe = !GetConsoleMode(input_handle, &dw);
449 /* When running under a GUI, you will end here. */
450 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
451 // input pipe may have been closed by the program that ran ffmpeg
469 static int decode_interrupt_cb(void *ctx)
471 return received_nb_signals > atomic_load(&transcode_init_done);
474 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
476 static void ffmpeg_cleanup(int ret)
481 int maxrss = getmaxrss() / 1024;
482 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
485 for (i = 0; i < nb_filtergraphs; i++) {
486 FilterGraph *fg = filtergraphs[i];
487 avfilter_graph_free(&fg->graph);
488 for (j = 0; j < fg->nb_inputs; j++) {
489 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
491 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
492 sizeof(frame), NULL);
493 av_frame_free(&frame);
495 av_fifo_freep(&fg->inputs[j]->frame_queue);
496 if (fg->inputs[j]->ist->sub2video.sub_queue) {
497 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
499 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
500 &sub, sizeof(sub), NULL);
501 avsubtitle_free(&sub);
503 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
505 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
506 av_freep(&fg->inputs[j]->name);
507 av_freep(&fg->inputs[j]);
509 av_freep(&fg->inputs);
510 for (j = 0; j < fg->nb_outputs; j++) {
511 av_freep(&fg->outputs[j]->name);
512 av_freep(&fg->outputs[j]->formats);
513 av_freep(&fg->outputs[j]->channel_layouts);
514 av_freep(&fg->outputs[j]->sample_rates);
515 av_freep(&fg->outputs[j]);
517 av_freep(&fg->outputs);
518 av_freep(&fg->graph_desc);
520 av_freep(&filtergraphs[i]);
522 av_freep(&filtergraphs);
524 av_freep(&subtitle_out);
527 for (i = 0; i < nb_output_files; i++) {
528 OutputFile *of = output_files[i];
533 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
535 avformat_free_context(s);
536 av_dict_free(&of->opts);
538 av_freep(&output_files[i]);
540 for (i = 0; i < nb_output_streams; i++) {
541 OutputStream *ost = output_streams[i];
546 for (j = 0; j < ost->nb_bitstream_filters; j++)
547 av_bsf_free(&ost->bsf_ctx[j]);
548 av_freep(&ost->bsf_ctx);
550 av_frame_free(&ost->filtered_frame);
551 av_frame_free(&ost->last_frame);
552 av_dict_free(&ost->encoder_opts);
554 av_parser_close(ost->parser);
555 avcodec_free_context(&ost->parser_avctx);
557 av_freep(&ost->forced_keyframes);
558 av_expr_free(ost->forced_keyframes_pexpr);
559 av_freep(&ost->avfilter);
560 av_freep(&ost->logfile_prefix);
562 av_freep(&ost->audio_channels_map);
563 ost->audio_channels_mapped = 0;
565 av_dict_free(&ost->sws_dict);
567 avcodec_free_context(&ost->enc_ctx);
568 avcodec_parameters_free(&ost->ref_par);
570 if (ost->muxing_queue) {
571 while (av_fifo_size(ost->muxing_queue)) {
573 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
574 av_packet_unref(&pkt);
576 av_fifo_freep(&ost->muxing_queue);
579 av_freep(&output_streams[i]);
582 free_input_threads();
584 for (i = 0; i < nb_input_files; i++) {
585 avformat_close_input(&input_files[i]->ctx);
586 av_freep(&input_files[i]);
588 for (i = 0; i < nb_input_streams; i++) {
589 InputStream *ist = input_streams[i];
591 av_frame_free(&ist->decoded_frame);
592 av_frame_free(&ist->filter_frame);
593 av_dict_free(&ist->decoder_opts);
594 avsubtitle_free(&ist->prev_sub.subtitle);
595 av_frame_free(&ist->sub2video.frame);
596 av_freep(&ist->filters);
597 av_freep(&ist->hwaccel_device);
598 av_freep(&ist->dts_buffer);
600 avcodec_free_context(&ist->dec_ctx);
602 av_freep(&input_streams[i]);
606 if (fclose(vstats_file))
607 av_log(NULL, AV_LOG_ERROR,
608 "Error closing vstats file, loss of information possible: %s\n",
609 av_err2str(AVERROR(errno)));
611 av_freep(&vstats_filename);
613 av_freep(&input_streams);
614 av_freep(&input_files);
615 av_freep(&output_streams);
616 av_freep(&output_files);
620 avformat_network_deinit();
622 if (received_sigterm) {
623 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
624 (int) received_sigterm);
625 } else if (ret && atomic_load(&transcode_init_done)) {
626 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632 void remove_avoptions(AVDictionary **a, AVDictionary *b)
634 AVDictionaryEntry *t = NULL;
636 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
637 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
641 void assert_avoptions(AVDictionary *m)
643 AVDictionaryEntry *t;
644 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
645 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
650 static void abort_codec_experimental(AVCodec *c, int encoder)
655 static void update_benchmark(const char *fmt, ...)
657 if (do_benchmark_all) {
658 int64_t t = getutime();
664 vsnprintf(buf, sizeof(buf), fmt, va);
666 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
672 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
675 for (i = 0; i < nb_output_streams; i++) {
676 OutputStream *ost2 = output_streams[i];
677 ost2->finished |= ost == ost2 ? this_stream : others;
681 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
683 AVFormatContext *s = of->ctx;
684 AVStream *st = ost->st;
688 * Audio encoders may split the packets -- #frames in != #packets out.
689 * But there is no reordering, so we can limit the number of output packets
690 * by simply dropping them here.
691 * Counting encoded video frames needs to be done separately because of
692 * reordering, see do_video_out().
693 * Do not count the packet when unqueued because it has been counted when queued.
695 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
696 if (ost->frame_number >= ost->max_frames) {
697 av_packet_unref(pkt);
703 if (!of->header_written) {
704 AVPacket tmp_pkt = {0};
705 /* the muxer is not initialized yet, buffer the packet */
706 if (!av_fifo_space(ost->muxing_queue)) {
707 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
708 ost->max_muxing_queue_size);
709 if (new_size <= av_fifo_size(ost->muxing_queue)) {
710 av_log(NULL, AV_LOG_ERROR,
711 "Too many packets buffered for output stream %d:%d.\n",
712 ost->file_index, ost->st->index);
715 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
719 ret = av_packet_ref(&tmp_pkt, pkt);
722 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
723 av_packet_unref(pkt);
727 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
728 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
729 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
731 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
733 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
735 ost->quality = sd ? AV_RL32(sd) : -1;
736 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
738 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
740 ost->error[i] = AV_RL64(sd + 8 + 8*i);
745 if (ost->frame_rate.num && ost->is_cfr) {
746 if (pkt->duration > 0)
747 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
748 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
753 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
755 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
756 if (pkt->dts != AV_NOPTS_VALUE &&
757 pkt->pts != AV_NOPTS_VALUE &&
758 pkt->dts > pkt->pts) {
759 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
761 ost->file_index, ost->st->index);
763 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
764 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
765 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
767 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
768 pkt->dts != AV_NOPTS_VALUE &&
769 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
770 ost->last_mux_dts != AV_NOPTS_VALUE) {
771 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
772 if (pkt->dts < max) {
773 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
774 av_log(s, loglevel, "Non-monotonous DTS in output stream "
775 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
776 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
778 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
781 av_log(s, loglevel, "changing to %"PRId64". This may result "
782 "in incorrect timestamps in the output file.\n",
784 if (pkt->pts >= pkt->dts)
785 pkt->pts = FFMAX(pkt->pts, max);
790 ost->last_mux_dts = pkt->dts;
792 ost->data_size += pkt->size;
793 ost->packets_written++;
795 pkt->stream_index = ost->index;
798 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
799 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
800 av_get_media_type_string(ost->enc_ctx->codec_type),
801 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
802 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
807 ret = av_interleaved_write_frame(s, pkt);
809 print_error("av_interleaved_write_frame()", ret);
810 main_return_code = 1;
811 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
813 av_packet_unref(pkt);
816 static void close_output_stream(OutputStream *ost)
818 OutputFile *of = output_files[ost->file_index];
820 ost->finished |= ENCODER_FINISHED;
822 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
823 of->recording_time = FFMIN(of->recording_time, end);
828 * Send a single packet to the output, applying any bitstream filters
829 * associated with the output stream. This may result in any number
830 * of packets actually being written, depending on what bitstream
831 * filters are applied. The supplied packet is consumed and will be
832 * blank (as if newly-allocated) when this function returns.
834 * If eof is set, instead indicate EOF to all bitstream filters and
835 * therefore flush any delayed packets to the output. A blank packet
836 * must be supplied in this case.
838 static void output_packet(OutputFile *of, AVPacket *pkt,
839 OutputStream *ost, int eof)
843 /* apply the output bitstream filters, if any */
844 if (ost->nb_bitstream_filters) {
847 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
854 /* get a packet from the previous filter up the chain */
855 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
856 if (ret == AVERROR(EAGAIN)) {
860 } else if (ret == AVERROR_EOF) {
865 /* send it to the next filter down the chain or to the muxer */
866 if (idx < ost->nb_bitstream_filters) {
867 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
875 write_packet(of, pkt, ost, 0);
878 write_packet(of, pkt, ost, 0);
881 if (ret < 0 && ret != AVERROR_EOF) {
882 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
883 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
889 static int check_recording_time(OutputStream *ost)
891 OutputFile *of = output_files[ost->file_index];
893 if (of->recording_time != INT64_MAX &&
894 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
895 AV_TIME_BASE_Q) >= 0) {
896 close_output_stream(ost);
902 static void do_audio_out(OutputFile *of, OutputStream *ost,
905 AVCodecContext *enc = ost->enc_ctx;
909 av_init_packet(&pkt);
913 if (!check_recording_time(ost))
916 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
917 frame->pts = ost->sync_opts;
918 ost->sync_opts = frame->pts + frame->nb_samples;
919 ost->samples_encoded += frame->nb_samples;
920 ost->frames_encoded++;
922 av_assert0(pkt.size || !pkt.data);
923 update_benchmark(NULL);
925 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
926 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
927 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
928 enc->time_base.num, enc->time_base.den);
931 ret = avcodec_send_frame(enc, frame);
936 ret = avcodec_receive_packet(enc, &pkt);
937 if (ret == AVERROR(EAGAIN))
942 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
944 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
947 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
948 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
949 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
950 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
953 output_packet(of, &pkt, ost, 0);
958 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
962 static void do_subtitle_out(OutputFile *of,
966 int subtitle_out_max_size = 1024 * 1024;
967 int subtitle_out_size, nb, i;
972 if (sub->pts == AV_NOPTS_VALUE) {
973 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
982 subtitle_out = av_malloc(subtitle_out_max_size);
984 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
989 /* Note: DVB subtitle need one packet to draw them and one other
990 packet to clear them */
991 /* XXX: signal it in the codec context ? */
992 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
997 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
999 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1000 pts -= output_files[ost->file_index]->start_time;
1001 for (i = 0; i < nb; i++) {
1002 unsigned save_num_rects = sub->num_rects;
1004 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1005 if (!check_recording_time(ost))
1009 // start_display_time is required to be 0
1010 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1011 sub->end_display_time -= sub->start_display_time;
1012 sub->start_display_time = 0;
1016 ost->frames_encoded++;
1018 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1019 subtitle_out_max_size, sub);
1021 sub->num_rects = save_num_rects;
1022 if (subtitle_out_size < 0) {
1023 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1027 av_init_packet(&pkt);
1028 pkt.data = subtitle_out;
1029 pkt.size = subtitle_out_size;
1030 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1031 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1032 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1033 /* XXX: the pts correction is handled here. Maybe handling
1034 it in the codec would be better */
1036 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1038 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041 output_packet(of, &pkt, ost, 0);
1045 static void do_video_out(OutputFile *of,
1047 AVFrame *next_picture,
1050 int ret, format_video_sync;
1052 AVCodecContext *enc = ost->enc_ctx;
1053 AVCodecParameters *mux_par = ost->st->codecpar;
1054 AVRational frame_rate;
1055 int nb_frames, nb0_frames, i;
1056 double delta, delta0;
1057 double duration = 0;
1059 InputStream *ist = NULL;
1060 AVFilterContext *filter = ost->filter->filter;
1062 if (ost->source_index >= 0)
1063 ist = input_streams[ost->source_index];
1065 frame_rate = av_buffersink_get_frame_rate(filter);
1066 if (frame_rate.num > 0 && frame_rate.den > 0)
1067 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1069 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1070 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1072 if (!ost->filters_script &&
1076 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1077 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1080 if (!next_picture) {
1082 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1083 ost->last_nb0_frames[1],
1084 ost->last_nb0_frames[2]);
1086 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1087 delta = delta0 + duration;
1089 /* by default, we output a single frame */
1090 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1093 format_video_sync = video_sync_method;
1094 if (format_video_sync == VSYNC_AUTO) {
1095 if(!strcmp(of->ctx->oformat->name, "avi")) {
1096 format_video_sync = VSYNC_VFR;
1098 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1100 && format_video_sync == VSYNC_CFR
1101 && input_files[ist->file_index]->ctx->nb_streams == 1
1102 && input_files[ist->file_index]->input_ts_offset == 0) {
1103 format_video_sync = VSYNC_VSCFR;
1105 if (format_video_sync == VSYNC_CFR && copy_ts) {
1106 format_video_sync = VSYNC_VSCFR;
1109 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1113 format_video_sync != VSYNC_PASSTHROUGH &&
1114 format_video_sync != VSYNC_DROP) {
1115 if (delta0 < -0.6) {
1116 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1118 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1119 sync_ipts = ost->sync_opts;
1124 switch (format_video_sync) {
1126 if (ost->frame_number == 0 && delta0 >= 0.5) {
1127 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1130 ost->sync_opts = lrint(sync_ipts);
1133 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1134 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1136 } else if (delta < -1.1)
1138 else if (delta > 1.1) {
1139 nb_frames = lrintf(delta);
1141 nb0_frames = lrintf(delta0 - 0.6);
1147 else if (delta > 0.6)
1148 ost->sync_opts = lrint(sync_ipts);
1151 case VSYNC_PASSTHROUGH:
1152 ost->sync_opts = lrint(sync_ipts);
1159 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1160 nb0_frames = FFMIN(nb0_frames, nb_frames);
1162 memmove(ost->last_nb0_frames + 1,
1163 ost->last_nb0_frames,
1164 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1165 ost->last_nb0_frames[0] = nb0_frames;
1167 if (nb0_frames == 0 && ost->last_dropped) {
1169 av_log(NULL, AV_LOG_VERBOSE,
1170 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1171 ost->frame_number, ost->st->index, ost->last_frame->pts);
1173 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1174 if (nb_frames > dts_error_threshold * 30) {
1175 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1179 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1180 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1181 if (nb_frames_dup > dup_warning) {
1182 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1186 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1188 /* duplicates frame if needed */
1189 for (i = 0; i < nb_frames; i++) {
1190 AVFrame *in_picture;
1191 av_init_packet(&pkt);
1195 if (i < nb0_frames && ost->last_frame) {
1196 in_picture = ost->last_frame;
1198 in_picture = next_picture;
1203 in_picture->pts = ost->sync_opts;
1206 if (!check_recording_time(ost))
1208 if (ost->frame_number >= ost->max_frames)
1213 int forced_keyframe = 0;
1216 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1217 ost->top_field_first >= 0)
1218 in_picture->top_field_first = !!ost->top_field_first;
1220 if (in_picture->interlaced_frame) {
1221 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1222 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1224 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1226 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1228 in_picture->quality = enc->global_quality;
1229 in_picture->pict_type = 0;
1231 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1232 in_picture->pts * av_q2d(enc->time_base) : NAN;
1233 if (ost->forced_kf_index < ost->forced_kf_count &&
1234 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1235 ost->forced_kf_index++;
1236 forced_keyframe = 1;
1237 } else if (ost->forced_keyframes_pexpr) {
1239 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1240 res = av_expr_eval(ost->forced_keyframes_pexpr,
1241 ost->forced_keyframes_expr_const_values, NULL);
1242 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1243 ost->forced_keyframes_expr_const_values[FKF_N],
1244 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1245 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1246 ost->forced_keyframes_expr_const_values[FKF_T],
1247 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1250 forced_keyframe = 1;
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1252 ost->forced_keyframes_expr_const_values[FKF_N];
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1254 ost->forced_keyframes_expr_const_values[FKF_T];
1255 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1258 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1259 } else if ( ost->forced_keyframes
1260 && !strncmp(ost->forced_keyframes, "source", 6)
1261 && in_picture->key_frame==1) {
1262 forced_keyframe = 1;
1265 if (forced_keyframe) {
1266 in_picture->pict_type = AV_PICTURE_TYPE_I;
1267 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1270 update_benchmark(NULL);
1272 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1273 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1274 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1275 enc->time_base.num, enc->time_base.den);
1278 ost->frames_encoded++;
1280 ret = avcodec_send_frame(enc, in_picture);
1285 ret = avcodec_receive_packet(enc, &pkt);
1286 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287 if (ret == AVERROR(EAGAIN))
1293 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1295 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1296 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1299 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1300 pkt.pts = ost->sync_opts;
1302 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1305 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1311 frame_size = pkt.size;
1312 output_packet(of, &pkt, ost, 0);
1314 /* if two pass, output log */
1315 if (ost->logfile && enc->stats_out) {
1316 fprintf(ost->logfile, "%s", enc->stats_out);
1322 * For video, number of frames in == number of packets out.
1323 * But there may be reordering, so we can't throw away frames on encoder
1324 * flush, we need to limit them here, before they go into encoder.
1326 ost->frame_number++;
1328 if (vstats_filename && frame_size)
1329 do_video_stats(ost, frame_size);
1332 if (!ost->last_frame)
1333 ost->last_frame = av_frame_alloc();
1334 av_frame_unref(ost->last_frame);
1335 if (next_picture && ost->last_frame)
1336 av_frame_ref(ost->last_frame, next_picture);
1338 av_frame_free(&ost->last_frame);
1342 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1346 static double psnr(double d)
1348 return -10.0 * log10(d);
1351 static void do_video_stats(OutputStream *ost, int frame_size)
1353 AVCodecContext *enc;
1355 double ti1, bitrate, avg_bitrate;
1357 /* this is executed just the first time do_video_stats is called */
1359 vstats_file = fopen(vstats_filename, "w");
1367 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1368 frame_number = ost->st->nb_frames;
1369 if (vstats_version <= 1) {
1370 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1371 ost->quality / (float)FF_QP2LAMBDA);
1373 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1374 ost->quality / (float)FF_QP2LAMBDA);
1377 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1378 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1380 fprintf(vstats_file,"f_size= %6d ", frame_size);
1381 /* compute pts value */
1382 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1386 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1387 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1388 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1389 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1390 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1394 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1396 static void finish_output_stream(OutputStream *ost)
1398 OutputFile *of = output_files[ost->file_index];
1401 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1404 for (i = 0; i < of->ctx->nb_streams; i++)
1405 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1410 * Get and encode new output from any of the filtergraphs, without causing
1413 * @return 0 for success, <0 for severe errors
1415 static int reap_filters(int flush)
1417 AVFrame *filtered_frame = NULL;
1420 /* Reap all buffers present in the buffer sinks */
1421 for (i = 0; i < nb_output_streams; i++) {
1422 OutputStream *ost = output_streams[i];
1423 OutputFile *of = output_files[ost->file_index];
1424 AVFilterContext *filter;
1425 AVCodecContext *enc = ost->enc_ctx;
1428 if (!ost->filter || !ost->filter->graph->graph)
1430 filter = ost->filter->filter;
1432 if (!ost->initialized) {
1433 char error[1024] = "";
1434 ret = init_output_stream(ost, error, sizeof(error));
1436 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1437 ost->file_index, ost->index, error);
1442 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1443 return AVERROR(ENOMEM);
1445 filtered_frame = ost->filtered_frame;
1448 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1449 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1450 AV_BUFFERSINK_FLAG_NO_REQUEST);
1452 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1453 av_log(NULL, AV_LOG_WARNING,
1454 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1455 } else if (flush && ret == AVERROR_EOF) {
1456 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1457 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1461 if (ost->finished) {
1462 av_frame_unref(filtered_frame);
1465 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1466 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1467 AVRational filter_tb = av_buffersink_get_time_base(filter);
1468 AVRational tb = enc->time_base;
1469 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1471 tb.den <<= extra_bits;
1473 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1474 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1475 float_pts /= 1 << extra_bits;
1476 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1477 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1479 filtered_frame->pts =
1480 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1481 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1483 //if (ost->source_index >= 0)
1484 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1486 switch (av_buffersink_get_type(filter)) {
1487 case AVMEDIA_TYPE_VIDEO:
1488 if (!ost->frame_aspect_ratio.num)
1489 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1492 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1493 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1495 enc->time_base.num, enc->time_base.den);
1498 do_video_out(of, ost, filtered_frame, float_pts);
1500 case AVMEDIA_TYPE_AUDIO:
1501 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1502 enc->channels != filtered_frame->channels) {
1503 av_log(NULL, AV_LOG_ERROR,
1504 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1507 do_audio_out(of, ost, filtered_frame);
1510 // TODO support subtitle filters
1514 av_frame_unref(filtered_frame);
1521 static void print_final_stats(int64_t total_size)
1523 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1524 uint64_t subtitle_size = 0;
1525 uint64_t data_size = 0;
1526 float percent = -1.0;
1530 for (i = 0; i < nb_output_streams; i++) {
1531 OutputStream *ost = output_streams[i];
1532 switch (ost->enc_ctx->codec_type) {
1533 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1534 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1535 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1536 default: other_size += ost->data_size; break;
1538 extra_size += ost->enc_ctx->extradata_size;
1539 data_size += ost->data_size;
1540 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1541 != AV_CODEC_FLAG_PASS1)
1545 if (data_size && total_size>0 && total_size >= data_size)
1546 percent = 100.0 * (total_size - data_size) / data_size;
1548 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1549 video_size / 1024.0,
1550 audio_size / 1024.0,
1551 subtitle_size / 1024.0,
1552 other_size / 1024.0,
1553 extra_size / 1024.0);
1555 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1557 av_log(NULL, AV_LOG_INFO, "unknown");
1558 av_log(NULL, AV_LOG_INFO, "\n");
1560 /* print verbose per-stream stats */
1561 for (i = 0; i < nb_input_files; i++) {
1562 InputFile *f = input_files[i];
1563 uint64_t total_packets = 0, total_size = 0;
1565 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1566 i, f->ctx->filename);
1568 for (j = 0; j < f->nb_streams; j++) {
1569 InputStream *ist = input_streams[f->ist_index + j];
1570 enum AVMediaType type = ist->dec_ctx->codec_type;
1572 total_size += ist->data_size;
1573 total_packets += ist->nb_packets;
1575 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1576 i, j, media_type_string(type));
1577 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1578 ist->nb_packets, ist->data_size);
1580 if (ist->decoding_needed) {
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1582 ist->frames_decoded);
1583 if (type == AVMEDIA_TYPE_AUDIO)
1584 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1585 av_log(NULL, AV_LOG_VERBOSE, "; ");
1588 av_log(NULL, AV_LOG_VERBOSE, "\n");
1591 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1592 total_packets, total_size);
1595 for (i = 0; i < nb_output_files; i++) {
1596 OutputFile *of = output_files[i];
1597 uint64_t total_packets = 0, total_size = 0;
1599 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1600 i, of->ctx->filename);
1602 for (j = 0; j < of->ctx->nb_streams; j++) {
1603 OutputStream *ost = output_streams[of->ost_index + j];
1604 enum AVMediaType type = ost->enc_ctx->codec_type;
1606 total_size += ost->data_size;
1607 total_packets += ost->packets_written;
1609 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1610 i, j, media_type_string(type));
1611 if (ost->encoding_needed) {
1612 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1613 ost->frames_encoded);
1614 if (type == AVMEDIA_TYPE_AUDIO)
1615 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1616 av_log(NULL, AV_LOG_VERBOSE, "; ");
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1620 ost->packets_written, ost->data_size);
1622 av_log(NULL, AV_LOG_VERBOSE, "\n");
1625 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1626 total_packets, total_size);
1628 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1629 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1631 av_log(NULL, AV_LOG_WARNING, "\n");
1633 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1638 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1641 AVBPrint buf_script;
1643 AVFormatContext *oc;
1645 AVCodecContext *enc;
1646 int frame_number, vid, i;
1649 int64_t pts = INT64_MIN + 1;
1650 static int64_t last_time = -1;
1651 static int qp_histogram[52];
1652 int hours, mins, secs, us;
1656 if (!print_stats && !is_last_report && !progress_avio)
1659 if (!is_last_report) {
1660 if (last_time == -1) {
1661 last_time = cur_time;
1664 if ((cur_time - last_time) < 500000)
1666 last_time = cur_time;
1669 t = (cur_time-timer_start) / 1000000.0;
1672 oc = output_files[0]->ctx;
1674 total_size = avio_size(oc->pb);
1675 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1676 total_size = avio_tell(oc->pb);
1680 av_bprint_init(&buf_script, 0, 1);
1681 for (i = 0; i < nb_output_streams; i++) {
1683 ost = output_streams[i];
1685 if (!ost->stream_copy)
1686 q = ost->quality / (float) FF_QP2LAMBDA;
1688 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1689 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1690 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1691 ost->file_index, ost->index, q);
1693 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696 frame_number = ost->frame_number;
1697 fps = t > 1 ? frame_number / t : 0;
1698 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1699 frame_number, fps < 9.95, fps, q);
1700 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1701 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1702 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703 ost->file_index, ost->index, q);
1705 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1709 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1711 for (j = 0; j < 32; j++)
1712 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1715 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1717 double error, error_sum = 0;
1718 double scale, scale_sum = 0;
1720 char type[3] = { 'Y','U','V' };
1721 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1722 for (j = 0; j < 3; j++) {
1723 if (is_last_report) {
1724 error = enc->error[j];
1725 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1727 error = ost->error[j];
1728 scale = enc->width * enc->height * 255.0 * 255.0;
1734 p = psnr(error / scale);
1735 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1736 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1737 ost->file_index, ost->index, type[j] | 32, p);
1739 p = psnr(error_sum / scale_sum);
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1741 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1742 ost->file_index, ost->index, p);
1746 /* compute min output value */
1747 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1748 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1749 ost->st->time_base, AV_TIME_BASE_Q));
1751 nb_frames_drop += ost->last_dropped;
1754 secs = FFABS(pts) / AV_TIME_BASE;
1755 us = FFABS(pts) % AV_TIME_BASE;
1761 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1762 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1764 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1766 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1767 "size=%8.0fkB time=", total_size / 1024.0);
1769 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1770 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1772 (100 * us) / AV_TIME_BASE);
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1776 av_bprintf(&buf_script, "bitrate=N/A\n");
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1779 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1782 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1783 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1784 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1785 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1786 hours, mins, secs, us);
1788 if (nb_frames_dup || nb_frames_drop)
1789 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1790 nb_frames_dup, nb_frames_drop);
1791 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1792 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1795 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1796 av_bprintf(&buf_script, "speed=N/A\n");
1798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1799 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1802 if (print_stats || is_last_report) {
1803 const char end = is_last_report ? '\n' : '\r';
1804 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1805 fprintf(stderr, "%s %c", buf, end);
1807 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1812 if (progress_avio) {
1813 av_bprintf(&buf_script, "progress=%s\n",
1814 is_last_report ? "end" : "continue");
1815 avio_write(progress_avio, buf_script.str,
1816 FFMIN(buf_script.len, buf_script.size - 1));
1817 avio_flush(progress_avio);
1818 av_bprint_finalize(&buf_script, NULL);
1819 if (is_last_report) {
1820 if ((ret = avio_closep(&progress_avio)) < 0)
1821 av_log(NULL, AV_LOG_ERROR,
1822 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1827 print_final_stats(total_size);
1830 static void flush_encoders(void)
1834 for (i = 0; i < nb_output_streams; i++) {
1835 OutputStream *ost = output_streams[i];
1836 AVCodecContext *enc = ost->enc_ctx;
1837 OutputFile *of = output_files[ost->file_index];
1839 if (!ost->encoding_needed)
1842 // Try to enable encoding with no input frames.
1843 // Maybe we should just let encoding fail instead.
1844 if (!ost->initialized) {
1845 FilterGraph *fg = ost->filter->graph;
1846 char error[1024] = "";
1848 av_log(NULL, AV_LOG_WARNING,
1849 "Finishing stream %d:%d without any data written to it.\n",
1850 ost->file_index, ost->st->index);
1852 if (ost->filter && !fg->graph) {
1854 for (x = 0; x < fg->nb_inputs; x++) {
1855 InputFilter *ifilter = fg->inputs[x];
1856 if (ifilter->format < 0) {
1857 AVCodecParameters *par = ifilter->ist->st->codecpar;
1858 // We never got any input. Set a fake format, which will
1859 // come from libavformat.
1860 ifilter->format = par->format;
1861 ifilter->sample_rate = par->sample_rate;
1862 ifilter->channels = par->channels;
1863 ifilter->channel_layout = par->channel_layout;
1864 ifilter->width = par->width;
1865 ifilter->height = par->height;
1866 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1870 if (!ifilter_has_all_input_formats(fg))
1873 ret = configure_filtergraph(fg);
1875 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1879 finish_output_stream(ost);
1882 ret = init_output_stream(ost, error, sizeof(error));
1884 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1885 ost->file_index, ost->index, error);
1890 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1893 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1897 const char *desc = NULL;
1901 switch (enc->codec_type) {
1902 case AVMEDIA_TYPE_AUDIO:
1905 case AVMEDIA_TYPE_VIDEO:
1912 av_init_packet(&pkt);
1916 update_benchmark(NULL);
1918 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1919 ret = avcodec_send_frame(enc, NULL);
1921 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1928 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1929 if (ret < 0 && ret != AVERROR_EOF) {
1930 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935 if (ost->logfile && enc->stats_out) {
1936 fprintf(ost->logfile, "%s", enc->stats_out);
1938 if (ret == AVERROR_EOF) {
1939 output_packet(of, &pkt, ost, 1);
1942 if (ost->finished & MUXER_FINISHED) {
1943 av_packet_unref(&pkt);
1946 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1947 pkt_size = pkt.size;
1948 output_packet(of, &pkt, ost, 0);
1949 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1950 do_video_stats(ost, pkt_size);
1957 * Check whether a packet from ist should be written into ost at this time
1959 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1961 OutputFile *of = output_files[ost->file_index];
1962 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1964 if (ost->source_index != ist_index)
1970 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1976 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1978 OutputFile *of = output_files[ost->file_index];
1979 InputFile *f = input_files [ist->file_index];
1980 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1981 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1982 AVPacket opkt = { 0 };
1984 av_init_packet(&opkt);
1986 // EOF: flush output bitstream filters.
1988 output_packet(of, &opkt, ost, 1);
1992 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1993 !ost->copy_initial_nonkeyframes)
1996 if (!ost->frame_number && !ost->copy_prior_start) {
1997 int64_t comp_start = start_time;
1998 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1999 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2000 if (pkt->pts == AV_NOPTS_VALUE ?
2001 ist->pts < comp_start :
2002 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2006 if (of->recording_time != INT64_MAX &&
2007 ist->pts >= of->recording_time + start_time) {
2008 close_output_stream(ost);
2012 if (f->recording_time != INT64_MAX) {
2013 start_time = f->ctx->start_time;
2014 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2015 start_time += f->start_time;
2016 if (ist->pts >= f->recording_time + start_time) {
2017 close_output_stream(ost);
2022 /* force the input stream PTS */
2023 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2026 if (pkt->pts != AV_NOPTS_VALUE)
2027 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2029 opkt.pts = AV_NOPTS_VALUE;
2031 if (pkt->dts == AV_NOPTS_VALUE)
2032 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2034 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2035 opkt.dts -= ost_tb_start_time;
2037 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2038 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2040 duration = ist->dec_ctx->frame_size;
2041 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2042 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2043 ost->mux_timebase) - ost_tb_start_time;
2046 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2048 opkt.flags = pkt->flags;
2049 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2050 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2051 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2052 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2053 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2055 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2056 &opkt.data, &opkt.size,
2057 pkt->data, pkt->size,
2058 pkt->flags & AV_PKT_FLAG_KEY);
2060 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2065 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2070 opkt.data = pkt->data;
2071 opkt.size = pkt->size;
2073 av_copy_packet_side_data(&opkt, pkt);
2075 output_packet(of, &opkt, ost, 0);
2078 int guess_input_channel_layout(InputStream *ist)
2080 AVCodecContext *dec = ist->dec_ctx;
2082 if (!dec->channel_layout) {
2083 char layout_name[256];
2085 if (dec->channels > ist->guess_layout_max)
2087 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2088 if (!dec->channel_layout)
2090 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2091 dec->channels, dec->channel_layout);
2092 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2093 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2098 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2100 if (*got_output || ret<0)
2101 decode_error_stat[ret<0] ++;
2103 if (ret < 0 && exit_on_error)
2106 if (exit_on_error && *got_output && ist) {
2107 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2108 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2114 // Filters can be configured only if the formats of all inputs are known.
2115 static int ifilter_has_all_input_formats(FilterGraph *fg)
2118 for (i = 0; i < fg->nb_inputs; i++) {
2119 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2120 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2126 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2128 FilterGraph *fg = ifilter->graph;
2129 int need_reinit, ret, i;
2131 /* determine if the parameters for this input changed */
2132 need_reinit = ifilter->format != frame->format;
2133 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2134 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2137 switch (ifilter->ist->st->codecpar->codec_type) {
2138 case AVMEDIA_TYPE_AUDIO:
2139 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2140 ifilter->channels != frame->channels ||
2141 ifilter->channel_layout != frame->channel_layout;
2143 case AVMEDIA_TYPE_VIDEO:
2144 need_reinit |= ifilter->width != frame->width ||
2145 ifilter->height != frame->height;
2150 ret = ifilter_parameters_from_frame(ifilter, frame);
2155 /* (re)init the graph if possible, otherwise buffer the frame and return */
2156 if (need_reinit || !fg->graph) {
2157 for (i = 0; i < fg->nb_inputs; i++) {
2158 if (!ifilter_has_all_input_formats(fg)) {
2159 AVFrame *tmp = av_frame_clone(frame);
2161 return AVERROR(ENOMEM);
2162 av_frame_unref(frame);
2164 if (!av_fifo_space(ifilter->frame_queue)) {
2165 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2167 av_frame_free(&tmp);
2171 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2176 ret = reap_filters(1);
2177 if (ret < 0 && ret != AVERROR_EOF) {
2179 av_strerror(ret, errbuf, sizeof(errbuf));
2181 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2185 ret = configure_filtergraph(fg);
2187 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2192 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2194 if (ret != AVERROR_EOF)
2195 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2202 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2208 if (ifilter->filter) {
2209 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2213 // the filtergraph was never configured
2214 FilterGraph *fg = ifilter->graph;
2215 for (i = 0; i < fg->nb_inputs; i++)
2216 if (!fg->inputs[i]->eof)
2218 if (i == fg->nb_inputs) {
2219 // All the input streams have finished without the filtergraph
2220 // ever being configured.
2221 // Mark the output streams as finished.
2222 for (j = 0; j < fg->nb_outputs; j++)
2223 finish_output_stream(fg->outputs[j]->ost);
2230 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2231 // There is the following difference: if you got a frame, you must call
2232 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2233 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2234 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2241 ret = avcodec_send_packet(avctx, pkt);
2242 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2243 // decoded frames with avcodec_receive_frame() until done.
2244 if (ret < 0 && ret != AVERROR_EOF)
2248 ret = avcodec_receive_frame(avctx, frame);
2249 if (ret < 0 && ret != AVERROR(EAGAIN))
2257 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2262 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2263 for (i = 0; i < ist->nb_filters; i++) {
2264 if (i < ist->nb_filters - 1) {
2265 f = ist->filter_frame;
2266 ret = av_frame_ref(f, decoded_frame);
2271 ret = ifilter_send_frame(ist->filters[i], f);
2272 if (ret == AVERROR_EOF)
2273 ret = 0; /* ignore */
2275 av_log(NULL, AV_LOG_ERROR,
2276 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2283 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2286 AVFrame *decoded_frame;
2287 AVCodecContext *avctx = ist->dec_ctx;
2289 AVRational decoded_frame_tb;
2291 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2292 return AVERROR(ENOMEM);
2293 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2294 return AVERROR(ENOMEM);
2295 decoded_frame = ist->decoded_frame;
2297 update_benchmark(NULL);
2298 ret = decode(avctx, decoded_frame, got_output, pkt);
2299 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2303 if (ret >= 0 && avctx->sample_rate <= 0) {
2304 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2305 ret = AVERROR_INVALIDDATA;
2308 if (ret != AVERROR_EOF)
2309 check_decode_result(ist, got_output, ret);
2311 if (!*got_output || ret < 0)
2314 ist->samples_decoded += decoded_frame->nb_samples;
2315 ist->frames_decoded++;
2318 /* increment next_dts to use for the case where the input stream does not
2319 have timestamps or there are multiple frames in the packet */
2320 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2322 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2326 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2327 decoded_frame_tb = ist->st->time_base;
2328 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2329 decoded_frame->pts = pkt->pts;
2330 decoded_frame_tb = ist->st->time_base;
2332 decoded_frame->pts = ist->dts;
2333 decoded_frame_tb = AV_TIME_BASE_Q;
2335 if (decoded_frame->pts != AV_NOPTS_VALUE)
2336 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2337 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2338 (AVRational){1, avctx->sample_rate});
2339 ist->nb_samples = decoded_frame->nb_samples;
2340 err = send_frame_to_filters(ist, decoded_frame);
2342 av_frame_unref(ist->filter_frame);
2343 av_frame_unref(decoded_frame);
2344 return err < 0 ? err : ret;
2347 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2350 AVFrame *decoded_frame;
2351 int i, ret = 0, err = 0;
2352 int64_t best_effort_timestamp;
2353 int64_t dts = AV_NOPTS_VALUE;
2356 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2357 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2359 if (!eof && pkt && pkt->size == 0)
2362 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2363 return AVERROR(ENOMEM);
2364 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2365 return AVERROR(ENOMEM);
2366 decoded_frame = ist->decoded_frame;
2367 if (ist->dts != AV_NOPTS_VALUE)
2368 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2371 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2374 // The old code used to set dts on the drain packet, which does not work
2375 // with the new API anymore.
2377 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2379 return AVERROR(ENOMEM);
2380 ist->dts_buffer = new;
2381 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2384 update_benchmark(NULL);
2385 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2386 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2390 // The following line may be required in some cases where there is no parser
2391 // or the parser does not has_b_frames correctly
2392 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2393 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2394 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2396 av_log(ist->dec_ctx, AV_LOG_WARNING,
2397 "video_delay is larger in decoder than demuxer %d > %d.\n"
2398 "If you want to help, upload a sample "
2399 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2400 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2401 ist->dec_ctx->has_b_frames,
2402 ist->st->codecpar->video_delay);
2405 if (ret != AVERROR_EOF)
2406 check_decode_result(ist, got_output, ret);
2408 if (*got_output && ret >= 0) {
2409 if (ist->dec_ctx->width != decoded_frame->width ||
2410 ist->dec_ctx->height != decoded_frame->height ||
2411 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2412 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2413 decoded_frame->width,
2414 decoded_frame->height,
2415 decoded_frame->format,
2416 ist->dec_ctx->width,
2417 ist->dec_ctx->height,
2418 ist->dec_ctx->pix_fmt);
2422 if (!*got_output || ret < 0)
2425 if(ist->top_field_first>=0)
2426 decoded_frame->top_field_first = ist->top_field_first;
2428 ist->frames_decoded++;
2430 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2431 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2435 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2437 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2438 *duration_pts = decoded_frame->pkt_duration;
2440 if (ist->framerate.num)
2441 best_effort_timestamp = ist->cfr_next_pts++;
2443 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2444 best_effort_timestamp = ist->dts_buffer[0];
2446 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2447 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2448 ist->nb_dts_buffer--;
2451 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2452 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2454 if (ts != AV_NOPTS_VALUE)
2455 ist->next_pts = ist->pts = ts;
2459 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2460 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2461 ist->st->index, av_ts2str(decoded_frame->pts),
2462 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2463 best_effort_timestamp,
2464 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2465 decoded_frame->key_frame, decoded_frame->pict_type,
2466 ist->st->time_base.num, ist->st->time_base.den);
2469 if (ist->st->sample_aspect_ratio.num)
2470 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2472 err = send_frame_to_filters(ist, decoded_frame);
2475 av_frame_unref(ist->filter_frame);
2476 av_frame_unref(decoded_frame);
2477 return err < 0 ? err : ret;
2480 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2483 AVSubtitle subtitle;
2485 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2486 &subtitle, got_output, pkt);
2488 check_decode_result(NULL, got_output, ret);
2490 if (ret < 0 || !*got_output) {
2493 sub2video_flush(ist);
2497 if (ist->fix_sub_duration) {
2499 if (ist->prev_sub.got_output) {
2500 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2501 1000, AV_TIME_BASE);
2502 if (end < ist->prev_sub.subtitle.end_display_time) {
2503 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2504 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2505 ist->prev_sub.subtitle.end_display_time, end,
2506 end <= 0 ? ", dropping it" : "");
2507 ist->prev_sub.subtitle.end_display_time = end;
2510 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2511 FFSWAP(int, ret, ist->prev_sub.ret);
2512 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2520 if (ist->sub2video.frame) {
2521 sub2video_update(ist, &subtitle);
2522 } else if (ist->nb_filters) {
2523 if (!ist->sub2video.sub_queue)
2524 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2525 if (!ist->sub2video.sub_queue)
2527 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2528 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2532 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2536 if (!subtitle.num_rects)
2539 ist->frames_decoded++;
2541 for (i = 0; i < nb_output_streams; i++) {
2542 OutputStream *ost = output_streams[i];
2544 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2545 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2548 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2553 avsubtitle_free(&subtitle);
2557 static int send_filter_eof(InputStream *ist)
2560 /* TODO keep pts also in stream time base to avoid converting back */
2561 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2562 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2564 for (i = 0; i < ist->nb_filters; i++) {
2565 ret = ifilter_send_eof(ist->filters[i], pts);
2572 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2573 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2577 int eof_reached = 0;
2580 if (!ist->saw_first_ts) {
2581 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2583 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2584 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2585 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2587 ist->saw_first_ts = 1;
2590 if (ist->next_dts == AV_NOPTS_VALUE)
2591 ist->next_dts = ist->dts;
2592 if (ist->next_pts == AV_NOPTS_VALUE)
2593 ist->next_pts = ist->pts;
2597 av_init_packet(&avpkt);
2604 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2605 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2606 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2607 ist->next_pts = ist->pts = ist->dts;
2610 // while we have more to decode or while the decoder did output something on EOF
2611 while (ist->decoding_needed) {
2612 int64_t duration_dts = 0;
2613 int64_t duration_pts = 0;
2615 int decode_failed = 0;
2617 ist->pts = ist->next_pts;
2618 ist->dts = ist->next_dts;
2620 switch (ist->dec_ctx->codec_type) {
2621 case AVMEDIA_TYPE_AUDIO:
2622 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2625 case AVMEDIA_TYPE_VIDEO:
2626 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2628 if (!repeating || !pkt || got_output) {
2629 if (pkt && pkt->duration) {
2630 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2631 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2632 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2633 duration_dts = ((int64_t)AV_TIME_BASE *
2634 ist->dec_ctx->framerate.den * ticks) /
2635 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2638 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2639 ist->next_dts += duration_dts;
2641 ist->next_dts = AV_NOPTS_VALUE;
2645 if (duration_pts > 0) {
2646 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2648 ist->next_pts += duration_dts;
2652 case AVMEDIA_TYPE_SUBTITLE:
2655 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2656 if (!pkt && ret >= 0)
2663 if (ret == AVERROR_EOF) {
2669 if (decode_failed) {
2670 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2671 ist->file_index, ist->st->index, av_err2str(ret));
2673 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2674 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2676 if (!decode_failed || exit_on_error)
2682 ist->got_output = 1;
2687 // During draining, we might get multiple output frames in this loop.
2688 // ffmpeg.c does not drain the filter chain on configuration changes,
2689 // which means if we send multiple frames at once to the filters, and
2690 // one of those frames changes configuration, the buffered frames will
2691 // be lost. This can upset certain FATE tests.
2692 // Decode only 1 frame per call on EOF to appease these FATE tests.
2693 // The ideal solution would be to rewrite decoding to use the new
2694 // decoding API in a better way.
2701 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2702 /* except when looping we need to flush but not to send an EOF */
2703 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2704 int ret = send_filter_eof(ist);
2706 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2711 /* handle stream copy */
2712 if (!ist->decoding_needed && pkt) {
2713 ist->dts = ist->next_dts;
2714 switch (ist->dec_ctx->codec_type) {
2715 case AVMEDIA_TYPE_AUDIO:
2716 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2717 ist->dec_ctx->sample_rate;
2719 case AVMEDIA_TYPE_VIDEO:
2720 if (ist->framerate.num) {
2721 // TODO: Remove work-around for c99-to-c89 issue 7
2722 AVRational time_base_q = AV_TIME_BASE_Q;
2723 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2724 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2725 } else if (pkt->duration) {
2726 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2727 } else if(ist->dec_ctx->framerate.num != 0) {
2728 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2729 ist->next_dts += ((int64_t)AV_TIME_BASE *
2730 ist->dec_ctx->framerate.den * ticks) /
2731 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2735 ist->pts = ist->dts;
2736 ist->next_pts = ist->next_dts;
2738 for (i = 0; i < nb_output_streams; i++) {
2739 OutputStream *ost = output_streams[i];
2741 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2744 do_streamcopy(ist, ost, pkt);
2747 return !eof_reached;
2750 static void print_sdp(void)
2755 AVIOContext *sdp_pb;
2756 AVFormatContext **avc;
2758 for (i = 0; i < nb_output_files; i++) {
2759 if (!output_files[i]->header_written)
2763 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2766 for (i = 0, j = 0; i < nb_output_files; i++) {
2767 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2768 avc[j] = output_files[i]->ctx;
2776 av_sdp_create(avc, j, sdp, sizeof(sdp));
2778 if (!sdp_filename) {
2779 printf("SDP:\n%s\n", sdp);
2782 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2783 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2785 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2786 avio_closep(&sdp_pb);
2787 av_freep(&sdp_filename);
2795 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2797 InputStream *ist = s->opaque;
2798 const enum AVPixelFormat *p;
2801 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2802 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2803 const AVCodecHWConfig *config = NULL;
2806 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2809 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2810 ist->hwaccel_id == HWACCEL_AUTO) {
2812 config = avcodec_get_hw_config(s->codec, i);
2815 if (!(config->methods &
2816 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2818 if (config->pix_fmt == *p)
2823 if (config->device_type != ist->hwaccel_device_type) {
2824 // Different hwaccel offered, ignore.
2828 ret = hwaccel_decode_init(s);
2830 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2831 av_log(NULL, AV_LOG_FATAL,
2832 "%s hwaccel requested for input stream #%d:%d, "
2833 "but cannot be initialized.\n",
2834 av_hwdevice_get_type_name(config->device_type),
2835 ist->file_index, ist->st->index);
2836 return AV_PIX_FMT_NONE;
2841 const HWAccel *hwaccel = NULL;
2843 for (i = 0; hwaccels[i].name; i++) {
2844 if (hwaccels[i].pix_fmt == *p) {
2845 hwaccel = &hwaccels[i];
2850 // No hwaccel supporting this pixfmt.
2853 if (hwaccel->id != ist->hwaccel_id) {
2854 // Does not match requested hwaccel.
2858 ret = hwaccel->init(s);
2860 av_log(NULL, AV_LOG_FATAL,
2861 "%s hwaccel requested for input stream #%d:%d, "
2862 "but cannot be initialized.\n", hwaccel->name,
2863 ist->file_index, ist->st->index);
2864 return AV_PIX_FMT_NONE;
2868 if (ist->hw_frames_ctx) {
2869 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2870 if (!s->hw_frames_ctx)
2871 return AV_PIX_FMT_NONE;
2874 ist->hwaccel_pix_fmt = *p;
2881 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2883 InputStream *ist = s->opaque;
2885 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2886 return ist->hwaccel_get_buffer(s, frame, flags);
2888 return avcodec_default_get_buffer2(s, frame, flags);
2891 static int init_input_stream(int ist_index, char *error, int error_len)
2894 InputStream *ist = input_streams[ist_index];
2896 if (ist->decoding_needed) {
2897 AVCodec *codec = ist->dec;
2899 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2900 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2901 return AVERROR(EINVAL);
2904 ist->dec_ctx->opaque = ist;
2905 ist->dec_ctx->get_format = get_format;
2906 ist->dec_ctx->get_buffer2 = get_buffer;
2907 ist->dec_ctx->thread_safe_callbacks = 1;
2909 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2910 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2911 (ist->decoding_needed & DECODING_FOR_OST)) {
2912 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2913 if (ist->decoding_needed & DECODING_FOR_FILTER)
2914 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2917 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2919 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2920 * audio, and video decoders such as cuvid or mediacodec */
2921 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2923 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2924 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2925 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2926 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2927 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2929 ret = hw_device_setup_for_decode(ist);
2931 snprintf(error, error_len, "Device setup failed for "
2932 "decoder on input stream #%d:%d : %s",
2933 ist->file_index, ist->st->index, av_err2str(ret));
2937 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2938 if (ret == AVERROR_EXPERIMENTAL)
2939 abort_codec_experimental(codec, 0);
2941 snprintf(error, error_len,
2942 "Error while opening decoder for input stream "
2944 ist->file_index, ist->st->index, av_err2str(ret));
2947 assert_avoptions(ist->decoder_opts);
2950 ist->next_pts = AV_NOPTS_VALUE;
2951 ist->next_dts = AV_NOPTS_VALUE;
2956 static InputStream *get_input_stream(OutputStream *ost)
2958 if (ost->source_index >= 0)
2959 return input_streams[ost->source_index];
2963 static int compare_int64(const void *a, const void *b)
2965 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2968 /* open the muxer when all the streams are initialized */
2969 static int check_init_output_file(OutputFile *of, int file_index)
2973 for (i = 0; i < of->ctx->nb_streams; i++) {
2974 OutputStream *ost = output_streams[of->ost_index + i];
2975 if (!ost->initialized)
2979 of->ctx->interrupt_callback = int_cb;
2981 ret = avformat_write_header(of->ctx, &of->opts);
2983 av_log(NULL, AV_LOG_ERROR,
2984 "Could not write header for output file #%d "
2985 "(incorrect codec parameters ?): %s\n",
2986 file_index, av_err2str(ret));
2989 //assert_avoptions(of->opts);
2990 of->header_written = 1;
2992 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2994 if (sdp_filename || want_sdp)
2997 /* flush the muxing queues */
2998 for (i = 0; i < of->ctx->nb_streams; i++) {
2999 OutputStream *ost = output_streams[of->ost_index + i];
3001 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3002 if (!av_fifo_size(ost->muxing_queue))
3003 ost->mux_timebase = ost->st->time_base;
3005 while (av_fifo_size(ost->muxing_queue)) {
3007 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3008 write_packet(of, &pkt, ost, 1);
3015 static int init_output_bsfs(OutputStream *ost)
3020 if (!ost->nb_bitstream_filters)
3023 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3024 ctx = ost->bsf_ctx[i];
3026 ret = avcodec_parameters_copy(ctx->par_in,
3027 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3031 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3033 ret = av_bsf_init(ctx);
3035 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3036 ost->bsf_ctx[i]->filter->name);
3041 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3042 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3046 ost->st->time_base = ctx->time_base_out;
3051 static int init_output_stream_streamcopy(OutputStream *ost)
3053 OutputFile *of = output_files[ost->file_index];
3054 InputStream *ist = get_input_stream(ost);
3055 AVCodecParameters *par_dst = ost->st->codecpar;
3056 AVCodecParameters *par_src = ost->ref_par;
3059 uint32_t codec_tag = par_dst->codec_tag;
3061 av_assert0(ist && !ost->filter);
3063 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3065 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3067 av_log(NULL, AV_LOG_FATAL,
3068 "Error setting up codec context options.\n");
3071 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3074 unsigned int codec_tag_tmp;
3075 if (!of->ctx->oformat->codec_tag ||
3076 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3077 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3078 codec_tag = par_src->codec_tag;
3081 ret = avcodec_parameters_copy(par_dst, par_src);
3085 par_dst->codec_tag = codec_tag;
3087 if (!ost->frame_rate.num)
3088 ost->frame_rate = ist->framerate;
3089 ost->st->avg_frame_rate = ost->frame_rate;
3091 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3095 // copy timebase while removing common factors
3096 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3097 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3099 // copy estimated duration as a hint to the muxer
3100 if (ost->st->duration <= 0 && ist->st->duration > 0)
3101 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3104 ost->st->disposition = ist->st->disposition;
3106 if (ist->st->nb_side_data) {
3107 for (i = 0; i < ist->st->nb_side_data; i++) {
3108 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3111 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3113 return AVERROR(ENOMEM);
3114 memcpy(dst_data, sd_src->data, sd_src->size);
3118 if (ost->rotate_overridden) {
3119 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3120 sizeof(int32_t) * 9);
3122 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3125 ost->parser = av_parser_init(par_dst->codec_id);
3126 ost->parser_avctx = avcodec_alloc_context3(NULL);
3127 if (!ost->parser_avctx)
3128 return AVERROR(ENOMEM);
3130 switch (par_dst->codec_type) {
3131 case AVMEDIA_TYPE_AUDIO:
3132 if (audio_volume != 256) {
3133 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3136 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3137 par_dst->block_align= 0;
3138 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3139 par_dst->block_align= 0;
3141 case AVMEDIA_TYPE_VIDEO:
3142 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3144 av_mul_q(ost->frame_aspect_ratio,
3145 (AVRational){ par_dst->height, par_dst->width });
3146 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3147 "with stream copy may produce invalid files\n");
3149 else if (ist->st->sample_aspect_ratio.num)
3150 sar = ist->st->sample_aspect_ratio;
3152 sar = par_src->sample_aspect_ratio;
3153 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3154 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3155 ost->st->r_frame_rate = ist->st->r_frame_rate;
3159 ost->mux_timebase = ist->st->time_base;
3164 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3166 AVDictionaryEntry *e;
3168 uint8_t *encoder_string;
3169 int encoder_string_len;
3170 int format_flags = 0;
3171 int codec_flags = ost->enc_ctx->flags;
3173 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3176 e = av_dict_get(of->opts, "fflags", NULL, 0);
3178 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3181 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3183 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3185 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3188 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3191 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3192 encoder_string = av_mallocz(encoder_string_len);
3193 if (!encoder_string)
3196 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3197 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3199 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3200 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3201 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3202 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3205 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3206 AVCodecContext *avctx)
3209 int n = 1, i, size, index = 0;
3212 for (p = kf; *p; p++)
3216 pts = av_malloc_array(size, sizeof(*pts));
3218 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3223 for (i = 0; i < n; i++) {
3224 char *next = strchr(p, ',');
3229 if (!memcmp(p, "chapters", 8)) {
3231 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3234 if (avf->nb_chapters > INT_MAX - size ||
3235 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3237 av_log(NULL, AV_LOG_FATAL,
3238 "Could not allocate forced key frames array.\n");
3241 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3242 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3244 for (j = 0; j < avf->nb_chapters; j++) {
3245 AVChapter *c = avf->chapters[j];
3246 av_assert1(index < size);
3247 pts[index++] = av_rescale_q(c->start, c->time_base,
3248 avctx->time_base) + t;
3253 t = parse_time_or_die("force_key_frames", p, 1);
3254 av_assert1(index < size);
3255 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3262 av_assert0(index == size);
3263 qsort(pts, size, sizeof(*pts), compare_int64);
3264 ost->forced_kf_count = size;
3265 ost->forced_kf_pts = pts;
3268 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3270 InputStream *ist = get_input_stream(ost);
3271 AVCodecContext *enc_ctx = ost->enc_ctx;
3272 AVFormatContext *oc;
3274 if (ost->enc_timebase.num > 0) {
3275 enc_ctx->time_base = ost->enc_timebase;
3279 if (ost->enc_timebase.num < 0) {
3281 enc_ctx->time_base = ist->st->time_base;
3285 oc = output_files[ost->file_index]->ctx;
3286 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3289 enc_ctx->time_base = default_time_base;
3292 static int init_output_stream_encode(OutputStream *ost)
3294 InputStream *ist = get_input_stream(ost);
3295 AVCodecContext *enc_ctx = ost->enc_ctx;
3296 AVCodecContext *dec_ctx = NULL;
3297 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3300 set_encoder_id(output_files[ost->file_index], ost);
3302 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3303 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3304 // which have to be filtered out to prevent leaking them to output files.
3305 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3308 ost->st->disposition = ist->st->disposition;
3310 dec_ctx = ist->dec_ctx;
3312 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3314 for (j = 0; j < oc->nb_streams; j++) {
3315 AVStream *st = oc->streams[j];
3316 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3319 if (j == oc->nb_streams)
3320 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3321 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3322 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3325 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3326 if (!ost->frame_rate.num)
3327 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3328 if (ist && !ost->frame_rate.num)
3329 ost->frame_rate = ist->framerate;
3330 if (ist && !ost->frame_rate.num)
3331 ost->frame_rate = ist->st->r_frame_rate;
3332 if (ist && !ost->frame_rate.num) {
3333 ost->frame_rate = (AVRational){25, 1};
3334 av_log(NULL, AV_LOG_WARNING,
3336 "about the input framerate is available. Falling "
3337 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3338 "if you want a different framerate.\n",
3339 ost->file_index, ost->index);
3341 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3342 if (ost->enc->supported_framerates && !ost->force_fps) {
3343 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3344 ost->frame_rate = ost->enc->supported_framerates[idx];
3346 // reduce frame rate for mpeg4 to be within the spec limits
3347 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3348 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3349 ost->frame_rate.num, ost->frame_rate.den, 65535);
3353 switch (enc_ctx->codec_type) {
3354 case AVMEDIA_TYPE_AUDIO:
3355 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3357 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3358 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3359 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3360 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3361 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3363 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3366 case AVMEDIA_TYPE_VIDEO:
3367 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3369 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3370 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3371 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3372 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3373 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3374 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3376 for (j = 0; j < ost->forced_kf_count; j++)
3377 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3379 enc_ctx->time_base);
3381 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3382 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3383 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3384 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3385 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3386 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3388 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3390 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3391 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3393 enc_ctx->framerate = ost->frame_rate;
3395 ost->st->avg_frame_rate = ost->frame_rate;
3398 enc_ctx->width != dec_ctx->width ||
3399 enc_ctx->height != dec_ctx->height ||
3400 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3401 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3404 if (ost->forced_keyframes) {
3405 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3406 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3407 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3409 av_log(NULL, AV_LOG_ERROR,
3410 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3413 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3414 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3415 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3416 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3418 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3419 // parse it only for static kf timings
3420 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3421 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3425 case AVMEDIA_TYPE_SUBTITLE:
3426 enc_ctx->time_base = AV_TIME_BASE_Q;
3427 if (!enc_ctx->width) {
3428 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3429 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3432 case AVMEDIA_TYPE_DATA:
3439 ost->mux_timebase = enc_ctx->time_base;
3444 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3448 if (ost->encoding_needed) {
3449 AVCodec *codec = ost->enc;
3450 AVCodecContext *dec = NULL;
3453 ret = init_output_stream_encode(ost);
3457 if ((ist = get_input_stream(ost)))
3459 if (dec && dec->subtitle_header) {
3460 /* ASS code assumes this buffer is null terminated so add extra byte. */
3461 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3462 if (!ost->enc_ctx->subtitle_header)
3463 return AVERROR(ENOMEM);
3464 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3465 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3467 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3468 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3469 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3471 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3472 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3473 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3475 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3476 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3477 av_buffersink_get_format(ost->filter->filter)) {
3478 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3479 if (!ost->enc_ctx->hw_frames_ctx)
3480 return AVERROR(ENOMEM);
3482 ret = hw_device_setup_for_encode(ost);
3484 snprintf(error, error_len, "Device setup failed for "
3485 "encoder on output stream #%d:%d : %s",
3486 ost->file_index, ost->index, av_err2str(ret));
3491 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3492 if (ret == AVERROR_EXPERIMENTAL)
3493 abort_codec_experimental(codec, 1);
3494 snprintf(error, error_len,
3495 "Error while opening encoder for output stream #%d:%d - "
3496 "maybe incorrect parameters such as bit_rate, rate, width or height",
3497 ost->file_index, ost->index);
3500 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3501 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3502 av_buffersink_set_frame_size(ost->filter->filter,
3503 ost->enc_ctx->frame_size);
3504 assert_avoptions(ost->encoder_opts);
3505 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3506 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3507 " It takes bits/s as argument, not kbits/s\n");
3509 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3511 av_log(NULL, AV_LOG_FATAL,
3512 "Error initializing the output stream codec context.\n");
3516 * FIXME: ost->st->codec should't be needed here anymore.
3518 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3522 if (ost->enc_ctx->nb_coded_side_data) {
3525 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3526 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3529 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3531 return AVERROR(ENOMEM);
3532 memcpy(dst_data, sd_src->data, sd_src->size);
3537 * Add global input side data. For now this is naive, and copies it
3538 * from the input stream's global side data. All side data should
3539 * really be funneled over AVFrame and libavfilter, then added back to
3540 * packet side data, and then potentially using the first packet for
3545 for (i = 0; i < ist->st->nb_side_data; i++) {
3546 AVPacketSideData *sd = &ist->st->side_data[i];
3547 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3549 return AVERROR(ENOMEM);
3550 memcpy(dst, sd->data, sd->size);
3551 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3552 av_display_rotation_set((uint32_t *)dst, 0);
3556 // copy timebase while removing common factors
3557 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3558 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3560 // copy estimated duration as a hint to the muxer
3561 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3562 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3564 ost->st->codec->codec= ost->enc_ctx->codec;
3565 } else if (ost->stream_copy) {
3566 ret = init_output_stream_streamcopy(ost);
3571 * FIXME: will the codec context used by the parser during streamcopy
3572 * This should go away with the new parser API.
3574 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3579 // parse user provided disposition, and update stream values
3580 if (ost->disposition) {
3581 static const AVOption opts[] = {
3582 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3583 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3584 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3585 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3586 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3587 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3588 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3589 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3590 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3591 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3592 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3593 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3594 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3595 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3598 static const AVClass class = {
3600 .item_name = av_default_item_name,
3602 .version = LIBAVUTIL_VERSION_INT,
3604 const AVClass *pclass = &class;
3606 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3611 /* initialize bitstream filters for the output stream
3612 * needs to be done here, because the codec id for streamcopy is not
3613 * known until now */
3614 ret = init_output_bsfs(ost);
3618 ost->initialized = 1;
3620 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3627 static void report_new_stream(int input_index, AVPacket *pkt)
3629 InputFile *file = input_files[input_index];
3630 AVStream *st = file->ctx->streams[pkt->stream_index];
3632 if (pkt->stream_index < file->nb_streams_warn)
3634 av_log(file->ctx, AV_LOG_WARNING,
3635 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3636 av_get_media_type_string(st->codecpar->codec_type),
3637 input_index, pkt->stream_index,
3638 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3639 file->nb_streams_warn = pkt->stream_index + 1;
3642 static int transcode_init(void)
3644 int ret = 0, i, j, k;
3645 AVFormatContext *oc;
3648 char error[1024] = {0};
3650 for (i = 0; i < nb_filtergraphs; i++) {
3651 FilterGraph *fg = filtergraphs[i];
3652 for (j = 0; j < fg->nb_outputs; j++) {
3653 OutputFilter *ofilter = fg->outputs[j];
3654 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3656 if (fg->nb_inputs != 1)
3658 for (k = nb_input_streams-1; k >= 0 ; k--)
3659 if (fg->inputs[0]->ist == input_streams[k])
3661 ofilter->ost->source_index = k;
3665 /* init framerate emulation */
3666 for (i = 0; i < nb_input_files; i++) {
3667 InputFile *ifile = input_files[i];
3668 if (ifile->rate_emu)
3669 for (j = 0; j < ifile->nb_streams; j++)
3670 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3673 /* init input streams */
3674 for (i = 0; i < nb_input_streams; i++)
3675 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3676 for (i = 0; i < nb_output_streams; i++) {
3677 ost = output_streams[i];
3678 avcodec_close(ost->enc_ctx);
3683 /* open each encoder */
3684 for (i = 0; i < nb_output_streams; i++) {
3685 // skip streams fed from filtergraphs until we have a frame for them
3686 if (output_streams[i]->filter)
3689 ret = init_output_stream(output_streams[i], error, sizeof(error));
3694 /* discard unused programs */
3695 for (i = 0; i < nb_input_files; i++) {
3696 InputFile *ifile = input_files[i];
3697 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3698 AVProgram *p = ifile->ctx->programs[j];
3699 int discard = AVDISCARD_ALL;
3701 for (k = 0; k < p->nb_stream_indexes; k++)
3702 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3703 discard = AVDISCARD_DEFAULT;
3706 p->discard = discard;
3710 /* write headers for files with no streams */
3711 for (i = 0; i < nb_output_files; i++) {
3712 oc = output_files[i]->ctx;
3713 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3714 ret = check_init_output_file(output_files[i], i);
3721 /* dump the stream mapping */
3722 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3723 for (i = 0; i < nb_input_streams; i++) {
3724 ist = input_streams[i];
3726 for (j = 0; j < ist->nb_filters; j++) {
3727 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3728 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3729 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3730 ist->filters[j]->name);
3731 if (nb_filtergraphs > 1)
3732 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3733 av_log(NULL, AV_LOG_INFO, "\n");
3738 for (i = 0; i < nb_output_streams; i++) {
3739 ost = output_streams[i];
3741 if (ost->attachment_filename) {
3742 /* an attached file */
3743 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3744 ost->attachment_filename, ost->file_index, ost->index);
3748 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3749 /* output from a complex graph */
3750 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3751 if (nb_filtergraphs > 1)
3752 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3754 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3755 ost->index, ost->enc ? ost->enc->name : "?");
3759 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3760 input_streams[ost->source_index]->file_index,
3761 input_streams[ost->source_index]->st->index,
3764 if (ost->sync_ist != input_streams[ost->source_index])
3765 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3766 ost->sync_ist->file_index,
3767 ost->sync_ist->st->index);
3768 if (ost->stream_copy)
3769 av_log(NULL, AV_LOG_INFO, " (copy)");
3771 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3772 const AVCodec *out_codec = ost->enc;
3773 const char *decoder_name = "?";
3774 const char *in_codec_name = "?";
3775 const char *encoder_name = "?";
3776 const char *out_codec_name = "?";
3777 const AVCodecDescriptor *desc;
3780 decoder_name = in_codec->name;
3781 desc = avcodec_descriptor_get(in_codec->id);
3783 in_codec_name = desc->name;
3784 if (!strcmp(decoder_name, in_codec_name))
3785 decoder_name = "native";
3789 encoder_name = out_codec->name;
3790 desc = avcodec_descriptor_get(out_codec->id);
3792 out_codec_name = desc->name;
3793 if (!strcmp(encoder_name, out_codec_name))
3794 encoder_name = "native";
3797 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3798 in_codec_name, decoder_name,
3799 out_codec_name, encoder_name);
3801 av_log(NULL, AV_LOG_INFO, "\n");
3805 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3809 atomic_store(&transcode_init_done, 1);
3814 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3815 static int need_output(void)
3819 for (i = 0; i < nb_output_streams; i++) {
3820 OutputStream *ost = output_streams[i];
3821 OutputFile *of = output_files[ost->file_index];
3822 AVFormatContext *os = output_files[ost->file_index]->ctx;
3824 if (ost->finished ||
3825 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3827 if (ost->frame_number >= ost->max_frames) {
3829 for (j = 0; j < of->ctx->nb_streams; j++)
3830 close_output_stream(output_streams[of->ost_index + j]);
3841 * Select the output stream to process.
3843 * @return selected output stream, or NULL if none available
3845 static OutputStream *choose_output(void)
3848 int64_t opts_min = INT64_MAX;
3849 OutputStream *ost_min = NULL;
3851 for (i = 0; i < nb_output_streams; i++) {
3852 OutputStream *ost = output_streams[i];
3853 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3854 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3856 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3857 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3859 if (!ost->initialized && !ost->inputs_done)
3862 if (!ost->finished && opts < opts_min) {
3864 ost_min = ost->unavailable ? NULL : ost;
3870 static void set_tty_echo(int on)
3874 if (tcgetattr(0, &tty) == 0) {
3875 if (on) tty.c_lflag |= ECHO;
3876 else tty.c_lflag &= ~ECHO;
3877 tcsetattr(0, TCSANOW, &tty);
3882 static int check_keyboard_interaction(int64_t cur_time)
3885 static int64_t last_time;
3886 if (received_nb_signals)
3887 return AVERROR_EXIT;
3888 /* read_key() returns 0 on EOF */
3889 if(cur_time - last_time >= 100000 && !run_as_daemon){
3891 last_time = cur_time;
3895 return AVERROR_EXIT;
3896 if (key == '+') av_log_set_level(av_log_get_level()+10);
3897 if (key == '-') av_log_set_level(av_log_get_level()-10);
3898 if (key == 's') qp_hist ^= 1;
3901 do_hex_dump = do_pkt_dump = 0;
3902 } else if(do_pkt_dump){
3906 av_log_set_level(AV_LOG_DEBUG);
3908 if (key == 'c' || key == 'C'){
3909 char buf[4096], target[64], command[256], arg[256] = {0};
3912 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3915 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3920 fprintf(stderr, "\n");
3922 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3923 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3924 target, time, command, arg);
3925 for (i = 0; i < nb_filtergraphs; i++) {
3926 FilterGraph *fg = filtergraphs[i];
3929 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3930 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3931 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3932 } else if (key == 'c') {
3933 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3934 ret = AVERROR_PATCHWELCOME;
3936 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3938 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3943 av_log(NULL, AV_LOG_ERROR,
3944 "Parse error, at least 3 arguments were expected, "
3945 "only %d given in string '%s'\n", n, buf);
3948 if (key == 'd' || key == 'D'){
3951 debug = input_streams[0]->st->codec->debug<<1;
3952 if(!debug) debug = 1;
3953 while(debug & (FF_DEBUG_DCT_COEFF
3955 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3957 )) //unsupported, would just crash
3964 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3969 fprintf(stderr, "\n");
3970 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3971 fprintf(stderr,"error parsing debug value\n");
3973 for(i=0;i<nb_input_streams;i++) {
3974 input_streams[i]->st->codec->debug = debug;
3976 for(i=0;i<nb_output_streams;i++) {
3977 OutputStream *ost = output_streams[i];
3978 ost->enc_ctx->debug = debug;
3980 if(debug) av_log_set_level(AV_LOG_DEBUG);
3981 fprintf(stderr,"debug=%d\n", debug);
3984 fprintf(stderr, "key function\n"
3985 "? show this help\n"
3986 "+ increase verbosity\n"
3987 "- decrease verbosity\n"
3988 "c Send command to first matching filter supporting it\n"
3989 "C Send/Queue command to all matching filters\n"
3990 "D cycle through available debug modes\n"
3991 "h dump packets/hex press to cycle through the 3 states\n"
3993 "s Show QP histogram\n"
4000 static void *input_thread(void *arg)
4003 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4008 ret = av_read_frame(f->ctx, &pkt);
4010 if (ret == AVERROR(EAGAIN)) {
4015 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4018 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4019 if (flags && ret == AVERROR(EAGAIN)) {
4021 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4022 av_log(f->ctx, AV_LOG_WARNING,
4023 "Thread message queue blocking; consider raising the "
4024 "thread_queue_size option (current value: %d)\n",
4025 f->thread_queue_size);
4028 if (ret != AVERROR_EOF)
4029 av_log(f->ctx, AV_LOG_ERROR,
4030 "Unable to send packet to main thread: %s\n",
4032 av_packet_unref(&pkt);
4033 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4041 static void free_input_threads(void)
4045 for (i = 0; i < nb_input_files; i++) {
4046 InputFile *f = input_files[i];
4049 if (!f || !f->in_thread_queue)
4051 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4052 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4053 av_packet_unref(&pkt);
4055 pthread_join(f->thread, NULL);
4057 av_thread_message_queue_free(&f->in_thread_queue);
4061 static int init_input_threads(void)
4065 if (nb_input_files == 1)
4068 for (i = 0; i < nb_input_files; i++) {
4069 InputFile *f = input_files[i];
4071 if (f->ctx->pb ? !f->ctx->pb->seekable :
4072 strcmp(f->ctx->iformat->name, "lavfi"))
4073 f->non_blocking = 1;
4074 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4075 f->thread_queue_size, sizeof(AVPacket));
4079 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4080 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4081 av_thread_message_queue_free(&f->in_thread_queue);
4082 return AVERROR(ret);
4088 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4090 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4092 AV_THREAD_MESSAGE_NONBLOCK : 0);
4096 static int get_input_packet(InputFile *f, AVPacket *pkt)
4100 for (i = 0; i < f->nb_streams; i++) {
4101 InputStream *ist = input_streams[f->ist_index + i];
4102 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4103 int64_t now = av_gettime_relative() - ist->start;
4105 return AVERROR(EAGAIN);
4110 if (nb_input_files > 1)
4111 return get_input_packet_mt(f, pkt);
4113 return av_read_frame(f->ctx, pkt);
4116 static int got_eagain(void)
4119 for (i = 0; i < nb_output_streams; i++)
4120 if (output_streams[i]->unavailable)
4125 static void reset_eagain(void)
4128 for (i = 0; i < nb_input_files; i++)
4129 input_files[i]->eagain = 0;
4130 for (i = 0; i < nb_output_streams; i++)
4131 output_streams[i]->unavailable = 0;
4134 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4135 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4136 AVRational time_base)
4142 return tmp_time_base;
4145 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4148 return tmp_time_base;
4154 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4157 AVCodecContext *avctx;
4158 int i, ret, has_audio = 0;
4159 int64_t duration = 0;
4161 ret = av_seek_frame(is, -1, is->start_time, 0);
4165 for (i = 0; i < ifile->nb_streams; i++) {
4166 ist = input_streams[ifile->ist_index + i];
4167 avctx = ist->dec_ctx;
4170 if (ist->decoding_needed) {
4171 process_input_packet(ist, NULL, 1);
4172 avcodec_flush_buffers(avctx);
4175 /* duration is the length of the last frame in a stream
4176 * when audio stream is present we don't care about
4177 * last video frame length because it's not defined exactly */
4178 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4182 for (i = 0; i < ifile->nb_streams; i++) {
4183 ist = input_streams[ifile->ist_index + i];
4184 avctx = ist->dec_ctx;
4187 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4188 AVRational sample_rate = {1, avctx->sample_rate};
4190 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4195 if (ist->framerate.num) {
4196 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4197 } else if (ist->st->avg_frame_rate.num) {
4198 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4203 if (!ifile->duration)
4204 ifile->time_base = ist->st->time_base;
4205 /* the total duration of the stream, max_pts - min_pts is
4206 * the duration of the stream without the last frame */
4207 duration += ist->max_pts - ist->min_pts;
4208 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4212 if (ifile->loop > 0)
4220 * - 0 -- one packet was read and processed
4221 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4222 * this function should be called again
4223 * - AVERROR_EOF -- this function should not be called again
4225 static int process_input(int file_index)
4227 InputFile *ifile = input_files[file_index];
4228 AVFormatContext *is;
4236 ret = get_input_packet(ifile, &pkt);
4238 if (ret == AVERROR(EAGAIN)) {
4242 if (ret < 0 && ifile->loop) {
4243 ret = seek_to_start(ifile, is);
4245 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4247 ret = get_input_packet(ifile, &pkt);
4248 if (ret == AVERROR(EAGAIN)) {
4254 if (ret != AVERROR_EOF) {
4255 print_error(is->filename, ret);
4260 for (i = 0; i < ifile->nb_streams; i++) {
4261 ist = input_streams[ifile->ist_index + i];
4262 if (ist->decoding_needed) {
4263 ret = process_input_packet(ist, NULL, 0);
4268 /* mark all outputs that don't go through lavfi as finished */
4269 for (j = 0; j < nb_output_streams; j++) {
4270 OutputStream *ost = output_streams[j];
4272 if (ost->source_index == ifile->ist_index + i &&
4273 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4274 finish_output_stream(ost);
4278 ifile->eof_reached = 1;
4279 return AVERROR(EAGAIN);
4285 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4286 is->streams[pkt.stream_index]);
4288 /* the following test is needed in case new streams appear
4289 dynamically in stream : we ignore them */
4290 if (pkt.stream_index >= ifile->nb_streams) {
4291 report_new_stream(file_index, &pkt);
4292 goto discard_packet;
4295 ist = input_streams[ifile->ist_index + pkt.stream_index];
4297 ist->data_size += pkt.size;
4301 goto discard_packet;
4303 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4304 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4309 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4310 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4311 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4312 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4313 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4314 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4315 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4316 av_ts2str(input_files[ist->file_index]->ts_offset),
4317 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4320 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4321 int64_t stime, stime2;
4322 // Correcting starttime based on the enabled streams
4323 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4324 // so we instead do it here as part of discontinuity handling
4325 if ( ist->next_dts == AV_NOPTS_VALUE
4326 && ifile->ts_offset == -is->start_time
4327 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4328 int64_t new_start_time = INT64_MAX;
4329 for (i=0; i<is->nb_streams; i++) {
4330 AVStream *st = is->streams[i];
4331 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4333 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4335 if (new_start_time > is->start_time) {
4336 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4337 ifile->ts_offset = -new_start_time;
4341 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4342 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4343 ist->wrap_correction_done = 1;
4345 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4346 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4347 ist->wrap_correction_done = 0;
4349 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4350 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4351 ist->wrap_correction_done = 0;
4355 /* add the stream-global side data to the first packet */
4356 if (ist->nb_packets == 1) {
4357 for (i = 0; i < ist->st->nb_side_data; i++) {
4358 AVPacketSideData *src_sd = &ist->st->side_data[i];
4361 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4364 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4367 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4371 memcpy(dst_data, src_sd->data, src_sd->size);
4375 if (pkt.dts != AV_NOPTS_VALUE)
4376 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4377 if (pkt.pts != AV_NOPTS_VALUE)
4378 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4380 if (pkt.pts != AV_NOPTS_VALUE)
4381 pkt.pts *= ist->ts_scale;
4382 if (pkt.dts != AV_NOPTS_VALUE)
4383 pkt.dts *= ist->ts_scale;
4385 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4386 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4387 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4388 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4389 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4390 int64_t delta = pkt_dts - ifile->last_ts;
4391 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4392 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4393 ifile->ts_offset -= delta;
4394 av_log(NULL, AV_LOG_DEBUG,
4395 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4396 delta, ifile->ts_offset);
4397 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4398 if (pkt.pts != AV_NOPTS_VALUE)
4399 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4403 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4404 if (pkt.pts != AV_NOPTS_VALUE) {
4405 pkt.pts += duration;
4406 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4407 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4410 if (pkt.dts != AV_NOPTS_VALUE)
4411 pkt.dts += duration;
4413 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4414 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4415 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4416 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4418 int64_t delta = pkt_dts - ist->next_dts;
4419 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4420 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4421 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4422 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4423 ifile->ts_offset -= delta;
4424 av_log(NULL, AV_LOG_DEBUG,
4425 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4426 delta, ifile->ts_offset);
4427 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4428 if (pkt.pts != AV_NOPTS_VALUE)
4429 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4432 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4433 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4434 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4435 pkt.dts = AV_NOPTS_VALUE;
4437 if (pkt.pts != AV_NOPTS_VALUE){
4438 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4439 delta = pkt_pts - ist->next_dts;
4440 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4441 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4442 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4443 pkt.pts = AV_NOPTS_VALUE;
4449 if (pkt.dts != AV_NOPTS_VALUE)
4450 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4453 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4454 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4455 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4456 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4457 av_ts2str(input_files[ist->file_index]->ts_offset),
4458 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4461 sub2video_heartbeat(ist, pkt.pts);
4463 process_input_packet(ist, &pkt, 0);
4466 av_packet_unref(&pkt);
4472 * Perform a step of transcoding for the specified filter graph.
4474 * @param[in] graph filter graph to consider
4475 * @param[out] best_ist input stream where a frame would allow to continue
4476 * @return 0 for success, <0 for error
4478 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4481 int nb_requests, nb_requests_max = 0;
4482 InputFilter *ifilter;
4486 ret = avfilter_graph_request_oldest(graph->graph);
4488 return reap_filters(0);
4490 if (ret == AVERROR_EOF) {
4491 ret = reap_filters(1);
4492 for (i = 0; i < graph->nb_outputs; i++)
4493 close_output_stream(graph->outputs[i]->ost);
4496 if (ret != AVERROR(EAGAIN))
4499 for (i = 0; i < graph->nb_inputs; i++) {
4500 ifilter = graph->inputs[i];
4502 if (input_files[ist->file_index]->eagain ||
4503 input_files[ist->file_index]->eof_reached)
4505 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4506 if (nb_requests > nb_requests_max) {
4507 nb_requests_max = nb_requests;
4513 for (i = 0; i < graph->nb_outputs; i++)
4514 graph->outputs[i]->ost->unavailable = 1;
4520 * Run a single step of transcoding.
4522 * @return 0 for success, <0 for error
4524 static int transcode_step(void)
4527 InputStream *ist = NULL;
4530 ost = choose_output();
4537 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4541 if (ost->filter && !ost->filter->graph->graph) {
4542 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4543 ret = configure_filtergraph(ost->filter->graph);
4545 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4551 if (ost->filter && ost->filter->graph->graph) {
4552 if (!ost->initialized) {
4553 char error[1024] = {0};
4554 ret = init_output_stream(ost, error, sizeof(error));
4556 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4557 ost->file_index, ost->index, error);
4561 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4565 } else if (ost->filter) {
4567 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4568 InputFilter *ifilter = ost->filter->graph->inputs[i];
4569 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4575 ost->inputs_done = 1;
4579 av_assert0(ost->source_index >= 0);
4580 ist = input_streams[ost->source_index];
4583 ret = process_input(ist->file_index);
4584 if (ret == AVERROR(EAGAIN)) {
4585 if (input_files[ist->file_index]->eagain)
4586 ost->unavailable = 1;
4591 return ret == AVERROR_EOF ? 0 : ret;
4593 return reap_filters(0);
4597 * The following code is the main loop of the file converter
4599 static int transcode(void)
4602 AVFormatContext *os;
4605 int64_t timer_start;
4606 int64_t total_packets_written = 0;
4608 ret = transcode_init();
4612 if (stdin_interaction) {
4613 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4616 timer_start = av_gettime_relative();
4619 if ((ret = init_input_threads()) < 0)
4623 while (!received_sigterm) {
4624 int64_t cur_time= av_gettime_relative();
4626 /* if 'q' pressed, exits */
4627 if (stdin_interaction)
4628 if (check_keyboard_interaction(cur_time) < 0)
4631 /* check if there's any stream where output is still needed */
4632 if (!need_output()) {
4633 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4637 ret = transcode_step();
4638 if (ret < 0 && ret != AVERROR_EOF) {
4640 av_strerror(ret, errbuf, sizeof(errbuf));
4642 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4646 /* dump report by using the output first video and audio streams */
4647 print_report(0, timer_start, cur_time);
4650 free_input_threads();
4653 /* at the end of stream, we must flush the decoder buffers */
4654 for (i = 0; i < nb_input_streams; i++) {
4655 ist = input_streams[i];
4656 if (!input_files[ist->file_index]->eof_reached) {
4657 process_input_packet(ist, NULL, 0);
4664 /* write the trailer if needed and close file */
4665 for (i = 0; i < nb_output_files; i++) {
4666 os = output_files[i]->ctx;
4667 if (!output_files[i]->header_written) {
4668 av_log(NULL, AV_LOG_ERROR,
4669 "Nothing was written into output file %d (%s), because "
4670 "at least one of its streams received no packets.\n",
4674 if ((ret = av_write_trailer(os)) < 0) {
4675 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4681 /* dump report by using the first video and audio streams */
4682 print_report(1, timer_start, av_gettime_relative());
4684 /* close each encoder */
4685 for (i = 0; i < nb_output_streams; i++) {
4686 ost = output_streams[i];
4687 if (ost->encoding_needed) {
4688 av_freep(&ost->enc_ctx->stats_in);
4690 total_packets_written += ost->packets_written;
4693 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4694 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4698 /* close each decoder */
4699 for (i = 0; i < nb_input_streams; i++) {
4700 ist = input_streams[i];
4701 if (ist->decoding_needed) {
4702 avcodec_close(ist->dec_ctx);
4703 if (ist->hwaccel_uninit)
4704 ist->hwaccel_uninit(ist->dec_ctx);
4708 av_buffer_unref(&hw_device_ctx);
4709 hw_device_free_all();
4716 free_input_threads();
4719 if (output_streams) {
4720 for (i = 0; i < nb_output_streams; i++) {
4721 ost = output_streams[i];
4724 if (fclose(ost->logfile))
4725 av_log(NULL, AV_LOG_ERROR,
4726 "Error closing logfile, loss of information possible: %s\n",
4727 av_err2str(AVERROR(errno)));
4728 ost->logfile = NULL;
4730 av_freep(&ost->forced_kf_pts);
4731 av_freep(&ost->apad);
4732 av_freep(&ost->disposition);
4733 av_dict_free(&ost->encoder_opts);
4734 av_dict_free(&ost->sws_dict);
4735 av_dict_free(&ost->swr_opts);
4736 av_dict_free(&ost->resample_opts);
4744 static int64_t getutime(void)
4747 struct rusage rusage;
4749 getrusage(RUSAGE_SELF, &rusage);
4750 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4751 #elif HAVE_GETPROCESSTIMES
4753 FILETIME c, e, k, u;
4754 proc = GetCurrentProcess();
4755 GetProcessTimes(proc, &c, &e, &k, &u);
4756 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4758 return av_gettime_relative();
4762 static int64_t getmaxrss(void)
4764 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4765 struct rusage rusage;
4766 getrusage(RUSAGE_SELF, &rusage);
4767 return (int64_t)rusage.ru_maxrss * 1024;
4768 #elif HAVE_GETPROCESSMEMORYINFO
4770 PROCESS_MEMORY_COUNTERS memcounters;
4771 proc = GetCurrentProcess();
4772 memcounters.cb = sizeof(memcounters);
4773 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4774 return memcounters.PeakPagefileUsage;
4780 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4784 int main(int argc, char **argv)
4791 register_exit(ffmpeg_cleanup);
4793 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4795 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4796 parse_loglevel(argc, argv, options);
4798 if(argc>1 && !strcmp(argv[1], "-d")){
4800 av_log_set_callback(log_callback_null);
4805 avcodec_register_all();
4807 avdevice_register_all();
4809 avfilter_register_all();
4811 avformat_network_init();
4813 show_banner(argc, argv, options);
4815 /* parse options and open all input/output files */
4816 ret = ffmpeg_parse_options(argc, argv);
4820 if (nb_output_files <= 0 && nb_input_files == 0) {
4822 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4826 /* file converter / grab */
4827 if (nb_output_files <= 0) {
4828 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4832 // if (nb_input_files == 0) {
4833 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4837 for (i = 0; i < nb_output_files; i++) {
4838 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4842 current_time = ti = getutime();
4843 if (transcode() < 0)
4845 ti = getutime() - ti;
4847 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4849 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4850 decode_error_stat[0], decode_error_stat[1]);
4851 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4854 exit_program(received_nb_signals ? 255 : main_return_code);
4855 return main_return_code;