2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && atomic_load(&transcode_init_done)) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
678 * Audio encoders may split the packets -- #frames in != #packets out.
679 * But there is no reordering, so we can limit the number of output packets
680 * by simply dropping them here.
681 * Counting encoded video frames needs to be done separately because of
682 * reordering, see do_video_out().
683 * Do not count the packet when unqueued because it has been counted when queued.
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
693 if (!of->header_written) {
694 AVPacket tmp_pkt = {0};
695 /* the muxer is not initialized yet, buffer the packet */
696 if (!av_fifo_space(ost->muxing_queue)) {
697 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698 ost->max_muxing_queue_size);
699 if (new_size <= av_fifo_size(ost->muxing_queue)) {
700 av_log(NULL, AV_LOG_ERROR,
701 "Too many packets buffered for output stream %d:%d.\n",
702 ost->file_index, ost->st->index);
705 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
709 ret = av_packet_ref(&tmp_pkt, pkt);
712 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713 av_packet_unref(pkt);
717 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
718 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
719 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 ost->quality = sd ? AV_RL32(sd) : -1;
726 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 ost->error[i] = AV_RL64(sd + 8 + 8*i);
735 if (ost->frame_rate.num && ost->is_cfr) {
736 if (pkt->duration > 0)
737 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
743 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746 if (pkt->dts != AV_NOPTS_VALUE &&
747 pkt->pts != AV_NOPTS_VALUE &&
748 pkt->dts > pkt->pts) {
749 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 ost->file_index, ost->st->index);
753 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
758 pkt->dts != AV_NOPTS_VALUE &&
759 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760 ost->last_mux_dts != AV_NOPTS_VALUE) {
761 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762 if (pkt->dts < max) {
763 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764 av_log(s, loglevel, "Non-monotonous DTS in output stream "
765 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
771 av_log(s, loglevel, "changing to %"PRId64". This may result "
772 "in incorrect timestamps in the output file.\n",
774 if (pkt->pts >= pkt->dts)
775 pkt->pts = FFMAX(pkt->pts, max);
780 ost->last_mux_dts = pkt->dts;
782 ost->data_size += pkt->size;
783 ost->packets_written++;
785 pkt->stream_index = ost->index;
788 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790 av_get_media_type_string(ost->enc_ctx->codec_type),
791 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
797 ret = av_interleaved_write_frame(s, pkt);
799 print_error("av_interleaved_write_frame()", ret);
800 main_return_code = 1;
801 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 av_packet_unref(pkt);
806 static void close_output_stream(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 ost->finished |= ENCODER_FINISHED;
812 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813 of->recording_time = FFMIN(of->recording_time, end);
817 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
821 /* apply the output bitstream filters, if any */
822 if (ost->nb_bitstream_filters) {
825 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
831 /* get a packet from the previous filter up the chain */
832 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
833 if (ret == AVERROR(EAGAIN)) {
840 /* send it to the next filter down the chain or to the muxer */
841 if (idx < ost->nb_bitstream_filters) {
842 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
847 write_packet(of, pkt, ost, 0);
850 write_packet(of, pkt, ost, 0);
853 if (ret < 0 && ret != AVERROR_EOF) {
854 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
855 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
861 static int check_recording_time(OutputStream *ost)
863 OutputFile *of = output_files[ost->file_index];
865 if (of->recording_time != INT64_MAX &&
866 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
867 AV_TIME_BASE_Q) >= 0) {
868 close_output_stream(ost);
874 static void do_audio_out(OutputFile *of, OutputStream *ost,
877 AVCodecContext *enc = ost->enc_ctx;
881 av_init_packet(&pkt);
885 if (!check_recording_time(ost))
888 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
889 frame->pts = ost->sync_opts;
890 ost->sync_opts = frame->pts + frame->nb_samples;
891 ost->samples_encoded += frame->nb_samples;
892 ost->frames_encoded++;
894 av_assert0(pkt.size || !pkt.data);
895 update_benchmark(NULL);
897 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
898 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
899 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
900 enc->time_base.num, enc->time_base.den);
903 ret = avcodec_send_frame(enc, frame);
908 ret = avcodec_receive_packet(enc, &pkt);
909 if (ret == AVERROR(EAGAIN))
914 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
916 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
919 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
920 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
921 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
922 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
925 output_packet(of, &pkt, ost);
930 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
934 static void do_subtitle_out(OutputFile *of,
938 int subtitle_out_max_size = 1024 * 1024;
939 int subtitle_out_size, nb, i;
944 if (sub->pts == AV_NOPTS_VALUE) {
945 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
954 subtitle_out = av_malloc(subtitle_out_max_size);
956 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
961 /* Note: DVB subtitle need one packet to draw them and one other
962 packet to clear them */
963 /* XXX: signal it in the codec context ? */
964 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
969 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
971 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
972 pts -= output_files[ost->file_index]->start_time;
973 for (i = 0; i < nb; i++) {
974 unsigned save_num_rects = sub->num_rects;
976 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
977 if (!check_recording_time(ost))
981 // start_display_time is required to be 0
982 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
983 sub->end_display_time -= sub->start_display_time;
984 sub->start_display_time = 0;
988 ost->frames_encoded++;
990 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
991 subtitle_out_max_size, sub);
993 sub->num_rects = save_num_rects;
994 if (subtitle_out_size < 0) {
995 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
999 av_init_packet(&pkt);
1000 pkt.data = subtitle_out;
1001 pkt.size = subtitle_out_size;
1002 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1003 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1004 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1005 /* XXX: the pts correction is handled here. Maybe handling
1006 it in the codec would be better */
1008 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1010 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1013 output_packet(of, &pkt, ost);
1017 static void do_video_out(OutputFile *of,
1019 AVFrame *next_picture,
1022 int ret, format_video_sync;
1024 AVCodecContext *enc = ost->enc_ctx;
1025 AVCodecParameters *mux_par = ost->st->codecpar;
1026 AVRational frame_rate;
1027 int nb_frames, nb0_frames, i;
1028 double delta, delta0;
1029 double duration = 0;
1031 InputStream *ist = NULL;
1032 AVFilterContext *filter = ost->filter->filter;
1034 if (ost->source_index >= 0)
1035 ist = input_streams[ost->source_index];
1037 frame_rate = av_buffersink_get_frame_rate(filter);
1038 if (frame_rate.num > 0 && frame_rate.den > 0)
1039 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1041 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1042 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1044 if (!ost->filters_script &&
1048 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1049 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1052 if (!next_picture) {
1054 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1055 ost->last_nb0_frames[1],
1056 ost->last_nb0_frames[2]);
1058 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1059 delta = delta0 + duration;
1061 /* by default, we output a single frame */
1062 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1065 format_video_sync = video_sync_method;
1066 if (format_video_sync == VSYNC_AUTO) {
1067 if(!strcmp(of->ctx->oformat->name, "avi")) {
1068 format_video_sync = VSYNC_VFR;
1070 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1072 && format_video_sync == VSYNC_CFR
1073 && input_files[ist->file_index]->ctx->nb_streams == 1
1074 && input_files[ist->file_index]->input_ts_offset == 0) {
1075 format_video_sync = VSYNC_VSCFR;
1077 if (format_video_sync == VSYNC_CFR && copy_ts) {
1078 format_video_sync = VSYNC_VSCFR;
1081 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1085 format_video_sync != VSYNC_PASSTHROUGH &&
1086 format_video_sync != VSYNC_DROP) {
1087 if (delta0 < -0.6) {
1088 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1090 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1091 sync_ipts = ost->sync_opts;
1096 switch (format_video_sync) {
1098 if (ost->frame_number == 0 && delta0 >= 0.5) {
1099 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1102 ost->sync_opts = lrint(sync_ipts);
1105 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1106 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1108 } else if (delta < -1.1)
1110 else if (delta > 1.1) {
1111 nb_frames = lrintf(delta);
1113 nb0_frames = lrintf(delta0 - 0.6);
1119 else if (delta > 0.6)
1120 ost->sync_opts = lrint(sync_ipts);
1123 case VSYNC_PASSTHROUGH:
1124 ost->sync_opts = lrint(sync_ipts);
1131 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1132 nb0_frames = FFMIN(nb0_frames, nb_frames);
1134 memmove(ost->last_nb0_frames + 1,
1135 ost->last_nb0_frames,
1136 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1137 ost->last_nb0_frames[0] = nb0_frames;
1139 if (nb0_frames == 0 && ost->last_dropped) {
1141 av_log(NULL, AV_LOG_VERBOSE,
1142 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1143 ost->frame_number, ost->st->index, ost->last_frame->pts);
1145 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1146 if (nb_frames > dts_error_threshold * 30) {
1147 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1151 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1152 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1153 if (nb_frames_dup > dup_warning) {
1154 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1158 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1160 /* duplicates frame if needed */
1161 for (i = 0; i < nb_frames; i++) {
1162 AVFrame *in_picture;
1163 av_init_packet(&pkt);
1167 if (i < nb0_frames && ost->last_frame) {
1168 in_picture = ost->last_frame;
1170 in_picture = next_picture;
1175 in_picture->pts = ost->sync_opts;
1178 if (!check_recording_time(ost))
1180 if (ost->frame_number >= ost->max_frames)
1184 #if FF_API_LAVF_FMT_RAWPICTURE
1185 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1186 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1187 /* raw pictures are written as AVPicture structure to
1188 avoid any copies. We support temporarily the older
1190 if (in_picture->interlaced_frame)
1191 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1193 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1194 pkt.data = (uint8_t *)in_picture;
1195 pkt.size = sizeof(AVPicture);
1196 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1197 pkt.flags |= AV_PKT_FLAG_KEY;
1199 output_packet(of, &pkt, ost);
1203 int forced_keyframe = 0;
1206 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1207 ost->top_field_first >= 0)
1208 in_picture->top_field_first = !!ost->top_field_first;
1210 if (in_picture->interlaced_frame) {
1211 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1212 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1214 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1216 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1218 in_picture->quality = enc->global_quality;
1219 in_picture->pict_type = 0;
1221 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1222 in_picture->pts * av_q2d(enc->time_base) : NAN;
1223 if (ost->forced_kf_index < ost->forced_kf_count &&
1224 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1225 ost->forced_kf_index++;
1226 forced_keyframe = 1;
1227 } else if (ost->forced_keyframes_pexpr) {
1229 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1230 res = av_expr_eval(ost->forced_keyframes_pexpr,
1231 ost->forced_keyframes_expr_const_values, NULL);
1232 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1233 ost->forced_keyframes_expr_const_values[FKF_N],
1234 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1235 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1236 ost->forced_keyframes_expr_const_values[FKF_T],
1237 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1240 forced_keyframe = 1;
1241 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1242 ost->forced_keyframes_expr_const_values[FKF_N];
1243 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1244 ost->forced_keyframes_expr_const_values[FKF_T];
1245 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1248 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1249 } else if ( ost->forced_keyframes
1250 && !strncmp(ost->forced_keyframes, "source", 6)
1251 && in_picture->key_frame==1) {
1252 forced_keyframe = 1;
1255 if (forced_keyframe) {
1256 in_picture->pict_type = AV_PICTURE_TYPE_I;
1257 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1260 update_benchmark(NULL);
1262 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1263 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1264 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1265 enc->time_base.num, enc->time_base.den);
1268 ost->frames_encoded++;
1270 ret = avcodec_send_frame(enc, in_picture);
1275 ret = avcodec_receive_packet(enc, &pkt);
1276 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1277 if (ret == AVERROR(EAGAIN))
1283 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1284 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1285 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1286 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1289 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1290 pkt.pts = ost->sync_opts;
1292 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1295 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1296 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1297 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1298 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1301 frame_size = pkt.size;
1302 output_packet(of, &pkt, ost);
1304 /* if two pass, output log */
1305 if (ost->logfile && enc->stats_out) {
1306 fprintf(ost->logfile, "%s", enc->stats_out);
1312 * For video, number of frames in == number of packets out.
1313 * But there may be reordering, so we can't throw away frames on encoder
1314 * flush, we need to limit them here, before they go into encoder.
1316 ost->frame_number++;
1318 if (vstats_filename && frame_size)
1319 do_video_stats(ost, frame_size);
1322 if (!ost->last_frame)
1323 ost->last_frame = av_frame_alloc();
1324 av_frame_unref(ost->last_frame);
1325 if (next_picture && ost->last_frame)
1326 av_frame_ref(ost->last_frame, next_picture);
1328 av_frame_free(&ost->last_frame);
1332 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1336 static double psnr(double d)
1338 return -10.0 * log10(d);
1341 static void do_video_stats(OutputStream *ost, int frame_size)
1343 AVCodecContext *enc;
1345 double ti1, bitrate, avg_bitrate;
1347 /* this is executed just the first time do_video_stats is called */
1349 vstats_file = fopen(vstats_filename, "w");
1357 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1358 frame_number = ost->st->nb_frames;
1359 if (vstats_version <= 1) {
1360 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1361 ost->quality / (float)FF_QP2LAMBDA);
1363 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1364 ost->quality / (float)FF_QP2LAMBDA);
1367 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1368 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1370 fprintf(vstats_file,"f_size= %6d ", frame_size);
1371 /* compute pts value */
1372 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1376 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1377 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1378 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1379 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1380 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1384 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1386 static void finish_output_stream(OutputStream *ost)
1388 OutputFile *of = output_files[ost->file_index];
1391 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1394 for (i = 0; i < of->ctx->nb_streams; i++)
1395 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1400 * Get and encode new output from any of the filtergraphs, without causing
1403 * @return 0 for success, <0 for severe errors
1405 static int reap_filters(int flush)
1407 AVFrame *filtered_frame = NULL;
1410 /* Reap all buffers present in the buffer sinks */
1411 for (i = 0; i < nb_output_streams; i++) {
1412 OutputStream *ost = output_streams[i];
1413 OutputFile *of = output_files[ost->file_index];
1414 AVFilterContext *filter;
1415 AVCodecContext *enc = ost->enc_ctx;
1418 if (!ost->filter || !ost->filter->graph->graph)
1420 filter = ost->filter->filter;
1422 if (!ost->initialized) {
1423 char error[1024] = "";
1424 ret = init_output_stream(ost, error, sizeof(error));
1426 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1427 ost->file_index, ost->index, error);
1432 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1433 return AVERROR(ENOMEM);
1435 filtered_frame = ost->filtered_frame;
1438 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1439 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1440 AV_BUFFERSINK_FLAG_NO_REQUEST);
1442 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1443 av_log(NULL, AV_LOG_WARNING,
1444 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1445 } else if (flush && ret == AVERROR_EOF) {
1446 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1447 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1451 if (ost->finished) {
1452 av_frame_unref(filtered_frame);
1455 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1456 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1457 AVRational filter_tb = av_buffersink_get_time_base(filter);
1458 AVRational tb = enc->time_base;
1459 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1461 tb.den <<= extra_bits;
1463 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1464 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1465 float_pts /= 1 << extra_bits;
1466 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1467 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1469 filtered_frame->pts =
1470 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1471 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1473 //if (ost->source_index >= 0)
1474 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1476 switch (av_buffersink_get_type(filter)) {
1477 case AVMEDIA_TYPE_VIDEO:
1478 if (!ost->frame_aspect_ratio.num)
1479 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1482 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1483 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1485 enc->time_base.num, enc->time_base.den);
1488 do_video_out(of, ost, filtered_frame, float_pts);
1490 case AVMEDIA_TYPE_AUDIO:
1491 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1492 enc->channels != filtered_frame->channels) {
1493 av_log(NULL, AV_LOG_ERROR,
1494 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1497 do_audio_out(of, ost, filtered_frame);
1500 // TODO support subtitle filters
1504 av_frame_unref(filtered_frame);
1511 static void print_final_stats(int64_t total_size)
1513 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1514 uint64_t subtitle_size = 0;
1515 uint64_t data_size = 0;
1516 float percent = -1.0;
1520 for (i = 0; i < nb_output_streams; i++) {
1521 OutputStream *ost = output_streams[i];
1522 switch (ost->enc_ctx->codec_type) {
1523 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1524 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1525 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1526 default: other_size += ost->data_size; break;
1528 extra_size += ost->enc_ctx->extradata_size;
1529 data_size += ost->data_size;
1530 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1531 != AV_CODEC_FLAG_PASS1)
1535 if (data_size && total_size>0 && total_size >= data_size)
1536 percent = 100.0 * (total_size - data_size) / data_size;
1538 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1539 video_size / 1024.0,
1540 audio_size / 1024.0,
1541 subtitle_size / 1024.0,
1542 other_size / 1024.0,
1543 extra_size / 1024.0);
1545 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1547 av_log(NULL, AV_LOG_INFO, "unknown");
1548 av_log(NULL, AV_LOG_INFO, "\n");
1550 /* print verbose per-stream stats */
1551 for (i = 0; i < nb_input_files; i++) {
1552 InputFile *f = input_files[i];
1553 uint64_t total_packets = 0, total_size = 0;
1555 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1556 i, f->ctx->filename);
1558 for (j = 0; j < f->nb_streams; j++) {
1559 InputStream *ist = input_streams[f->ist_index + j];
1560 enum AVMediaType type = ist->dec_ctx->codec_type;
1562 total_size += ist->data_size;
1563 total_packets += ist->nb_packets;
1565 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1566 i, j, media_type_string(type));
1567 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1568 ist->nb_packets, ist->data_size);
1570 if (ist->decoding_needed) {
1571 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1572 ist->frames_decoded);
1573 if (type == AVMEDIA_TYPE_AUDIO)
1574 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1575 av_log(NULL, AV_LOG_VERBOSE, "; ");
1578 av_log(NULL, AV_LOG_VERBOSE, "\n");
1581 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1582 total_packets, total_size);
1585 for (i = 0; i < nb_output_files; i++) {
1586 OutputFile *of = output_files[i];
1587 uint64_t total_packets = 0, total_size = 0;
1589 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1590 i, of->ctx->filename);
1592 for (j = 0; j < of->ctx->nb_streams; j++) {
1593 OutputStream *ost = output_streams[of->ost_index + j];
1594 enum AVMediaType type = ost->enc_ctx->codec_type;
1596 total_size += ost->data_size;
1597 total_packets += ost->packets_written;
1599 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1600 i, j, media_type_string(type));
1601 if (ost->encoding_needed) {
1602 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1603 ost->frames_encoded);
1604 if (type == AVMEDIA_TYPE_AUDIO)
1605 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1606 av_log(NULL, AV_LOG_VERBOSE, "; ");
1609 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1610 ost->packets_written, ost->data_size);
1612 av_log(NULL, AV_LOG_VERBOSE, "\n");
1615 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1616 total_packets, total_size);
1618 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1619 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1621 av_log(NULL, AV_LOG_WARNING, "\n");
1623 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1628 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1631 AVBPrint buf_script;
1633 AVFormatContext *oc;
1635 AVCodecContext *enc;
1636 int frame_number, vid, i;
1639 int64_t pts = INT64_MIN + 1;
1640 static int64_t last_time = -1;
1641 static int qp_histogram[52];
1642 int hours, mins, secs, us;
1646 if (!print_stats && !is_last_report && !progress_avio)
1649 if (!is_last_report) {
1650 if (last_time == -1) {
1651 last_time = cur_time;
1654 if ((cur_time - last_time) < 500000)
1656 last_time = cur_time;
1659 t = (cur_time-timer_start) / 1000000.0;
1662 oc = output_files[0]->ctx;
1664 total_size = avio_size(oc->pb);
1665 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1666 total_size = avio_tell(oc->pb);
1670 av_bprint_init(&buf_script, 0, 1);
1671 for (i = 0; i < nb_output_streams; i++) {
1673 ost = output_streams[i];
1675 if (!ost->stream_copy)
1676 q = ost->quality / (float) FF_QP2LAMBDA;
1678 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1679 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1680 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1681 ost->file_index, ost->index, q);
1683 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686 frame_number = ost->frame_number;
1687 fps = t > 1 ? frame_number / t : 0;
1688 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1689 frame_number, fps < 9.95, fps, q);
1690 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1691 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1692 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1693 ost->file_index, ost->index, q);
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1699 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1701 for (j = 0; j < 32; j++)
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1705 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1707 double error, error_sum = 0;
1708 double scale, scale_sum = 0;
1710 char type[3] = { 'Y','U','V' };
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1712 for (j = 0; j < 3; j++) {
1713 if (is_last_report) {
1714 error = enc->error[j];
1715 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1717 error = ost->error[j];
1718 scale = enc->width * enc->height * 255.0 * 255.0;
1724 p = psnr(error / scale);
1725 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1726 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1727 ost->file_index, ost->index, type[j] | 32, p);
1729 p = psnr(error_sum / scale_sum);
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1731 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1732 ost->file_index, ost->index, p);
1736 /* compute min output value */
1737 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1738 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1739 ost->st->time_base, AV_TIME_BASE_Q));
1741 nb_frames_drop += ost->last_dropped;
1744 secs = FFABS(pts) / AV_TIME_BASE;
1745 us = FFABS(pts) % AV_TIME_BASE;
1751 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1752 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1754 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1756 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1757 "size=%8.0fkB time=", total_size / 1024.0);
1759 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1761 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1762 (100 * us) / AV_TIME_BASE);
1765 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1766 av_bprintf(&buf_script, "bitrate=N/A\n");
1768 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1769 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1772 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1773 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1774 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1775 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1776 hours, mins, secs, us);
1778 if (nb_frames_dup || nb_frames_drop)
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1780 nb_frames_dup, nb_frames_drop);
1781 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1782 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1785 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1786 av_bprintf(&buf_script, "speed=N/A\n");
1788 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1789 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1792 if (print_stats || is_last_report) {
1793 const char end = is_last_report ? '\n' : '\r';
1794 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1795 fprintf(stderr, "%s %c", buf, end);
1797 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1802 if (progress_avio) {
1803 av_bprintf(&buf_script, "progress=%s\n",
1804 is_last_report ? "end" : "continue");
1805 avio_write(progress_avio, buf_script.str,
1806 FFMIN(buf_script.len, buf_script.size - 1));
1807 avio_flush(progress_avio);
1808 av_bprint_finalize(&buf_script, NULL);
1809 if (is_last_report) {
1810 if ((ret = avio_closep(&progress_avio)) < 0)
1811 av_log(NULL, AV_LOG_ERROR,
1812 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1817 print_final_stats(total_size);
1820 static void flush_encoders(void)
1824 for (i = 0; i < nb_output_streams; i++) {
1825 OutputStream *ost = output_streams[i];
1826 AVCodecContext *enc = ost->enc_ctx;
1827 OutputFile *of = output_files[ost->file_index];
1829 if (!ost->encoding_needed)
1832 // Try to enable encoding with no input frames.
1833 // Maybe we should just let encoding fail instead.
1834 if (!ost->initialized) {
1835 FilterGraph *fg = ost->filter->graph;
1836 char error[1024] = "";
1838 av_log(NULL, AV_LOG_WARNING,
1839 "Finishing stream %d:%d without any data written to it.\n",
1840 ost->file_index, ost->st->index);
1842 if (ost->filter && !fg->graph) {
1844 for (x = 0; x < fg->nb_inputs; x++) {
1845 InputFilter *ifilter = fg->inputs[x];
1846 if (ifilter->format < 0) {
1847 AVCodecParameters *par = ifilter->ist->st->codecpar;
1848 // We never got any input. Set a fake format, which will
1849 // come from libavformat.
1850 ifilter->format = par->format;
1851 ifilter->sample_rate = par->sample_rate;
1852 ifilter->channels = par->channels;
1853 ifilter->channel_layout = par->channel_layout;
1854 ifilter->width = par->width;
1855 ifilter->height = par->height;
1856 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1860 if (!ifilter_has_all_input_formats(fg))
1863 ret = configure_filtergraph(fg);
1865 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1869 finish_output_stream(ost);
1872 ret = init_output_stream(ost, error, sizeof(error));
1874 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1875 ost->file_index, ost->index, error);
1880 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1882 #if FF_API_LAVF_FMT_RAWPICTURE
1883 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1887 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1891 const char *desc = NULL;
1895 switch (enc->codec_type) {
1896 case AVMEDIA_TYPE_AUDIO:
1899 case AVMEDIA_TYPE_VIDEO:
1906 av_init_packet(&pkt);
1910 update_benchmark(NULL);
1912 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1913 ret = avcodec_send_frame(enc, NULL);
1915 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1922 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1923 if (ret < 0 && ret != AVERROR_EOF) {
1924 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1929 if (ost->logfile && enc->stats_out) {
1930 fprintf(ost->logfile, "%s", enc->stats_out);
1932 if (ret == AVERROR_EOF) {
1935 if (ost->finished & MUXER_FINISHED) {
1936 av_packet_unref(&pkt);
1939 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1940 pkt_size = pkt.size;
1941 output_packet(of, &pkt, ost);
1942 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1943 do_video_stats(ost, pkt_size);
1950 * Check whether a packet from ist should be written into ost at this time
1952 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1954 OutputFile *of = output_files[ost->file_index];
1955 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1957 if (ost->source_index != ist_index)
1963 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1969 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1971 OutputFile *of = output_files[ost->file_index];
1972 InputFile *f = input_files [ist->file_index];
1973 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1974 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1978 av_init_packet(&opkt);
1980 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1981 !ost->copy_initial_nonkeyframes)
1984 if (!ost->frame_number && !ost->copy_prior_start) {
1985 int64_t comp_start = start_time;
1986 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1987 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1988 if (pkt->pts == AV_NOPTS_VALUE ?
1989 ist->pts < comp_start :
1990 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1994 if (of->recording_time != INT64_MAX &&
1995 ist->pts >= of->recording_time + start_time) {
1996 close_output_stream(ost);
2000 if (f->recording_time != INT64_MAX) {
2001 start_time = f->ctx->start_time;
2002 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2003 start_time += f->start_time;
2004 if (ist->pts >= f->recording_time + start_time) {
2005 close_output_stream(ost);
2010 /* force the input stream PTS */
2011 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2014 if (pkt->pts != AV_NOPTS_VALUE)
2015 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2017 opkt.pts = AV_NOPTS_VALUE;
2019 if (pkt->dts == AV_NOPTS_VALUE)
2020 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2022 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2023 opkt.dts -= ost_tb_start_time;
2025 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2026 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2028 duration = ist->dec_ctx->frame_size;
2029 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2030 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2031 ost->mux_timebase) - ost_tb_start_time;
2034 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2036 opkt.flags = pkt->flags;
2037 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2038 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2039 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2040 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2041 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2043 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2044 &opkt.data, &opkt.size,
2045 pkt->data, pkt->size,
2046 pkt->flags & AV_PKT_FLAG_KEY);
2048 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2053 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2058 opkt.data = pkt->data;
2059 opkt.size = pkt->size;
2061 av_copy_packet_side_data(&opkt, pkt);
2063 #if FF_API_LAVF_FMT_RAWPICTURE
2064 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2065 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2066 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2067 /* store AVPicture in AVPacket, as expected by the output format */
2068 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2070 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2074 opkt.data = (uint8_t *)&pict;
2075 opkt.size = sizeof(AVPicture);
2076 opkt.flags |= AV_PKT_FLAG_KEY;
2080 output_packet(of, &opkt, ost);
2083 int guess_input_channel_layout(InputStream *ist)
2085 AVCodecContext *dec = ist->dec_ctx;
2087 if (!dec->channel_layout) {
2088 char layout_name[256];
2090 if (dec->channels > ist->guess_layout_max)
2092 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2093 if (!dec->channel_layout)
2095 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2096 dec->channels, dec->channel_layout);
2097 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2098 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2103 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2105 if (*got_output || ret<0)
2106 decode_error_stat[ret<0] ++;
2108 if (ret < 0 && exit_on_error)
2111 if (exit_on_error && *got_output && ist) {
2112 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2113 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2119 // Filters can be configured only if the formats of all inputs are known.
2120 static int ifilter_has_all_input_formats(FilterGraph *fg)
2123 for (i = 0; i < fg->nb_inputs; i++) {
2124 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2131 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2133 FilterGraph *fg = ifilter->graph;
2134 int need_reinit, ret, i;
2136 /* determine if the parameters for this input changed */
2137 need_reinit = ifilter->format != frame->format;
2138 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2139 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2142 switch (ifilter->ist->st->codecpar->codec_type) {
2143 case AVMEDIA_TYPE_AUDIO:
2144 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2145 ifilter->channels != frame->channels ||
2146 ifilter->channel_layout != frame->channel_layout;
2148 case AVMEDIA_TYPE_VIDEO:
2149 need_reinit |= ifilter->width != frame->width ||
2150 ifilter->height != frame->height;
2155 ret = ifilter_parameters_from_frame(ifilter, frame);
2160 /* (re)init the graph if possible, otherwise buffer the frame and return */
2161 if (need_reinit || !fg->graph) {
2162 for (i = 0; i < fg->nb_inputs; i++) {
2163 if (!ifilter_has_all_input_formats(fg)) {
2164 AVFrame *tmp = av_frame_clone(frame);
2166 return AVERROR(ENOMEM);
2167 av_frame_unref(frame);
2169 if (!av_fifo_space(ifilter->frame_queue)) {
2170 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2172 av_frame_free(&tmp);
2176 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2181 ret = reap_filters(1);
2182 if (ret < 0 && ret != AVERROR_EOF) {
2184 av_strerror(ret, errbuf, sizeof(errbuf));
2186 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2190 ret = configure_filtergraph(fg);
2192 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2197 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2199 if (ret != AVERROR_EOF)
2200 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2207 static int ifilter_send_eof(InputFilter *ifilter)
2213 if (ifilter->filter) {
2214 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2218 // the filtergraph was never configured
2219 FilterGraph *fg = ifilter->graph;
2220 for (i = 0; i < fg->nb_inputs; i++)
2221 if (!fg->inputs[i]->eof)
2223 if (i == fg->nb_inputs) {
2224 // All the input streams have finished without the filtergraph
2225 // ever being configured.
2226 // Mark the output streams as finished.
2227 for (j = 0; j < fg->nb_outputs; j++)
2228 finish_output_stream(fg->outputs[j]->ost);
2235 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2236 // There is the following difference: if you got a frame, you must call
2237 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2238 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2239 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2246 ret = avcodec_send_packet(avctx, pkt);
2247 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2248 // decoded frames with avcodec_receive_frame() until done.
2249 if (ret < 0 && ret != AVERROR_EOF)
2253 ret = avcodec_receive_frame(avctx, frame);
2254 if (ret < 0 && ret != AVERROR(EAGAIN))
2262 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2267 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2268 for (i = 0; i < ist->nb_filters; i++) {
2269 if (i < ist->nb_filters - 1) {
2270 f = ist->filter_frame;
2271 ret = av_frame_ref(f, decoded_frame);
2276 ret = ifilter_send_frame(ist->filters[i], f);
2277 if (ret == AVERROR_EOF)
2278 ret = 0; /* ignore */
2280 av_log(NULL, AV_LOG_ERROR,
2281 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2288 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2291 AVFrame *decoded_frame;
2292 AVCodecContext *avctx = ist->dec_ctx;
2294 AVRational decoded_frame_tb;
2296 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2297 return AVERROR(ENOMEM);
2298 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2299 return AVERROR(ENOMEM);
2300 decoded_frame = ist->decoded_frame;
2302 update_benchmark(NULL);
2303 ret = decode(avctx, decoded_frame, got_output, pkt);
2304 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2308 if (ret >= 0 && avctx->sample_rate <= 0) {
2309 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2310 ret = AVERROR_INVALIDDATA;
2313 if (ret != AVERROR_EOF)
2314 check_decode_result(ist, got_output, ret);
2316 if (!*got_output || ret < 0)
2319 ist->samples_decoded += decoded_frame->nb_samples;
2320 ist->frames_decoded++;
2323 /* increment next_dts to use for the case where the input stream does not
2324 have timestamps or there are multiple frames in the packet */
2325 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2327 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2332 decoded_frame_tb = ist->st->time_base;
2333 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2334 decoded_frame->pts = pkt->pts;
2335 decoded_frame_tb = ist->st->time_base;
2337 decoded_frame->pts = ist->dts;
2338 decoded_frame_tb = AV_TIME_BASE_Q;
2340 if (decoded_frame->pts != AV_NOPTS_VALUE)
2341 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2342 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2343 (AVRational){1, avctx->sample_rate});
2344 ist->nb_samples = decoded_frame->nb_samples;
2345 err = send_frame_to_filters(ist, decoded_frame);
2347 av_frame_unref(ist->filter_frame);
2348 av_frame_unref(decoded_frame);
2349 return err < 0 ? err : ret;
2352 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2355 AVFrame *decoded_frame;
2356 int i, ret = 0, err = 0;
2357 int64_t best_effort_timestamp;
2358 int64_t dts = AV_NOPTS_VALUE;
2361 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2362 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2364 if (!eof && pkt && pkt->size == 0)
2367 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2368 return AVERROR(ENOMEM);
2369 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2370 return AVERROR(ENOMEM);
2371 decoded_frame = ist->decoded_frame;
2372 if (ist->dts != AV_NOPTS_VALUE)
2373 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2376 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2379 // The old code used to set dts on the drain packet, which does not work
2380 // with the new API anymore.
2382 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2384 return AVERROR(ENOMEM);
2385 ist->dts_buffer = new;
2386 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2389 update_benchmark(NULL);
2390 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2391 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2395 // The following line may be required in some cases where there is no parser
2396 // or the parser does not has_b_frames correctly
2397 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2398 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2399 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2401 av_log(ist->dec_ctx, AV_LOG_WARNING,
2402 "video_delay is larger in decoder than demuxer %d > %d.\n"
2403 "If you want to help, upload a sample "
2404 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2405 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2406 ist->dec_ctx->has_b_frames,
2407 ist->st->codecpar->video_delay);
2410 if (ret != AVERROR_EOF)
2411 check_decode_result(ist, got_output, ret);
2413 if (*got_output && ret >= 0) {
2414 if (ist->dec_ctx->width != decoded_frame->width ||
2415 ist->dec_ctx->height != decoded_frame->height ||
2416 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2417 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2418 decoded_frame->width,
2419 decoded_frame->height,
2420 decoded_frame->format,
2421 ist->dec_ctx->width,
2422 ist->dec_ctx->height,
2423 ist->dec_ctx->pix_fmt);
2427 if (!*got_output || ret < 0)
2430 if(ist->top_field_first>=0)
2431 decoded_frame->top_field_first = ist->top_field_first;
2433 ist->frames_decoded++;
2435 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2436 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2440 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2442 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2444 if (ist->framerate.num)
2445 best_effort_timestamp = ist->cfr_next_pts++;
2447 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2448 best_effort_timestamp = ist->dts_buffer[0];
2450 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2451 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2452 ist->nb_dts_buffer--;
2455 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2456 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2458 if (ts != AV_NOPTS_VALUE)
2459 ist->next_pts = ist->pts = ts;
2463 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2464 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2465 ist->st->index, av_ts2str(decoded_frame->pts),
2466 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2467 best_effort_timestamp,
2468 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2469 decoded_frame->key_frame, decoded_frame->pict_type,
2470 ist->st->time_base.num, ist->st->time_base.den);
2473 if (ist->st->sample_aspect_ratio.num)
2474 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2476 err = send_frame_to_filters(ist, decoded_frame);
2479 av_frame_unref(ist->filter_frame);
2480 av_frame_unref(decoded_frame);
2481 return err < 0 ? err : ret;
2484 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2487 AVSubtitle subtitle;
2489 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2490 &subtitle, got_output, pkt);
2492 check_decode_result(NULL, got_output, ret);
2494 if (ret < 0 || !*got_output) {
2497 sub2video_flush(ist);
2501 if (ist->fix_sub_duration) {
2503 if (ist->prev_sub.got_output) {
2504 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2505 1000, AV_TIME_BASE);
2506 if (end < ist->prev_sub.subtitle.end_display_time) {
2507 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2508 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2509 ist->prev_sub.subtitle.end_display_time, end,
2510 end <= 0 ? ", dropping it" : "");
2511 ist->prev_sub.subtitle.end_display_time = end;
2514 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2515 FFSWAP(int, ret, ist->prev_sub.ret);
2516 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2524 if (ist->sub2video.frame) {
2525 sub2video_update(ist, &subtitle);
2526 } else if (ist->nb_filters) {
2527 if (!ist->sub2video.sub_queue)
2528 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2529 if (!ist->sub2video.sub_queue)
2531 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2532 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2536 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2540 if (!subtitle.num_rects)
2543 ist->frames_decoded++;
2545 for (i = 0; i < nb_output_streams; i++) {
2546 OutputStream *ost = output_streams[i];
2548 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2549 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2552 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2557 avsubtitle_free(&subtitle);
2561 static int send_filter_eof(InputStream *ist)
2564 for (i = 0; i < ist->nb_filters; i++) {
2565 ret = ifilter_send_eof(ist->filters[i]);
2572 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2573 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2577 int eof_reached = 0;
2580 if (!ist->saw_first_ts) {
2581 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2583 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2584 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2585 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2587 ist->saw_first_ts = 1;
2590 if (ist->next_dts == AV_NOPTS_VALUE)
2591 ist->next_dts = ist->dts;
2592 if (ist->next_pts == AV_NOPTS_VALUE)
2593 ist->next_pts = ist->pts;
2597 av_init_packet(&avpkt);
2604 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2605 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2606 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2607 ist->next_pts = ist->pts = ist->dts;
2610 // while we have more to decode or while the decoder did output something on EOF
2611 while (ist->decoding_needed) {
2612 int64_t duration = 0;
2614 int decode_failed = 0;
2616 ist->pts = ist->next_pts;
2617 ist->dts = ist->next_dts;
2619 switch (ist->dec_ctx->codec_type) {
2620 case AVMEDIA_TYPE_AUDIO:
2621 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2624 case AVMEDIA_TYPE_VIDEO:
2625 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2627 if (!repeating || !pkt || got_output) {
2628 if (pkt && pkt->duration) {
2629 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2631 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2632 duration = ((int64_t)AV_TIME_BASE *
2633 ist->dec_ctx->framerate.den * ticks) /
2634 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2637 if(ist->dts != AV_NOPTS_VALUE && duration) {
2638 ist->next_dts += duration;
2640 ist->next_dts = AV_NOPTS_VALUE;
2644 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2646 case AVMEDIA_TYPE_SUBTITLE:
2649 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2650 if (!pkt && ret >= 0)
2657 if (ret == AVERROR_EOF) {
2663 if (decode_failed) {
2664 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2665 ist->file_index, ist->st->index, av_err2str(ret));
2667 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2668 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2670 if (!decode_failed || exit_on_error)
2676 ist->got_output = 1;
2681 // During draining, we might get multiple output frames in this loop.
2682 // ffmpeg.c does not drain the filter chain on configuration changes,
2683 // which means if we send multiple frames at once to the filters, and
2684 // one of those frames changes configuration, the buffered frames will
2685 // be lost. This can upset certain FATE tests.
2686 // Decode only 1 frame per call on EOF to appease these FATE tests.
2687 // The ideal solution would be to rewrite decoding to use the new
2688 // decoding API in a better way.
2695 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2696 /* except when looping we need to flush but not to send an EOF */
2697 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2698 int ret = send_filter_eof(ist);
2700 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2705 /* handle stream copy */
2706 if (!ist->decoding_needed) {
2707 ist->dts = ist->next_dts;
2708 switch (ist->dec_ctx->codec_type) {
2709 case AVMEDIA_TYPE_AUDIO:
2710 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2711 ist->dec_ctx->sample_rate;
2713 case AVMEDIA_TYPE_VIDEO:
2714 if (ist->framerate.num) {
2715 // TODO: Remove work-around for c99-to-c89 issue 7
2716 AVRational time_base_q = AV_TIME_BASE_Q;
2717 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2718 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2719 } else if (pkt->duration) {
2720 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721 } else if(ist->dec_ctx->framerate.num != 0) {
2722 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2723 ist->next_dts += ((int64_t)AV_TIME_BASE *
2724 ist->dec_ctx->framerate.den * ticks) /
2725 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2729 ist->pts = ist->dts;
2730 ist->next_pts = ist->next_dts;
2732 for (i = 0; pkt && i < nb_output_streams; i++) {
2733 OutputStream *ost = output_streams[i];
2735 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2738 do_streamcopy(ist, ost, pkt);
2741 return !eof_reached;
2744 static void print_sdp(void)
2749 AVIOContext *sdp_pb;
2750 AVFormatContext **avc;
2752 for (i = 0; i < nb_output_files; i++) {
2753 if (!output_files[i]->header_written)
2757 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2760 for (i = 0, j = 0; i < nb_output_files; i++) {
2761 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2762 avc[j] = output_files[i]->ctx;
2770 av_sdp_create(avc, j, sdp, sizeof(sdp));
2772 if (!sdp_filename) {
2773 printf("SDP:\n%s\n", sdp);
2776 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2777 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2779 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2780 avio_closep(&sdp_pb);
2781 av_freep(&sdp_filename);
2789 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2792 for (i = 0; hwaccels[i].name; i++)
2793 if (hwaccels[i].pix_fmt == pix_fmt)
2794 return &hwaccels[i];
2798 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2800 InputStream *ist = s->opaque;
2801 const enum AVPixelFormat *p;
2804 for (p = pix_fmts; *p != -1; p++) {
2805 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2806 const HWAccel *hwaccel;
2808 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811 hwaccel = get_hwaccel(*p);
2813 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2814 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2817 ret = hwaccel->init(s);
2819 if (ist->hwaccel_id == hwaccel->id) {
2820 av_log(NULL, AV_LOG_FATAL,
2821 "%s hwaccel requested for input stream #%d:%d, "
2822 "but cannot be initialized.\n", hwaccel->name,
2823 ist->file_index, ist->st->index);
2824 return AV_PIX_FMT_NONE;
2829 if (ist->hw_frames_ctx) {
2830 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2831 if (!s->hw_frames_ctx)
2832 return AV_PIX_FMT_NONE;
2835 ist->active_hwaccel_id = hwaccel->id;
2836 ist->hwaccel_pix_fmt = *p;
2843 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2845 InputStream *ist = s->opaque;
2847 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2848 return ist->hwaccel_get_buffer(s, frame, flags);
2850 return avcodec_default_get_buffer2(s, frame, flags);
2853 static int init_input_stream(int ist_index, char *error, int error_len)
2856 InputStream *ist = input_streams[ist_index];
2858 if (ist->decoding_needed) {
2859 AVCodec *codec = ist->dec;
2861 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2862 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2863 return AVERROR(EINVAL);
2866 ist->dec_ctx->opaque = ist;
2867 ist->dec_ctx->get_format = get_format;
2868 ist->dec_ctx->get_buffer2 = get_buffer;
2869 ist->dec_ctx->thread_safe_callbacks = 1;
2871 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2872 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2873 (ist->decoding_needed & DECODING_FOR_OST)) {
2874 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2875 if (ist->decoding_needed & DECODING_FOR_FILTER)
2876 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2879 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2881 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2882 * audio, and video decoders such as cuvid or mediacodec */
2883 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2885 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2886 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2887 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2888 if (ret == AVERROR_EXPERIMENTAL)
2889 abort_codec_experimental(codec, 0);
2891 snprintf(error, error_len,
2892 "Error while opening decoder for input stream "
2894 ist->file_index, ist->st->index, av_err2str(ret));
2897 assert_avoptions(ist->decoder_opts);
2900 ist->next_pts = AV_NOPTS_VALUE;
2901 ist->next_dts = AV_NOPTS_VALUE;
2906 static InputStream *get_input_stream(OutputStream *ost)
2908 if (ost->source_index >= 0)
2909 return input_streams[ost->source_index];
2913 static int compare_int64(const void *a, const void *b)
2915 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2918 /* open the muxer when all the streams are initialized */
2919 static int check_init_output_file(OutputFile *of, int file_index)
2923 for (i = 0; i < of->ctx->nb_streams; i++) {
2924 OutputStream *ost = output_streams[of->ost_index + i];
2925 if (!ost->initialized)
2929 of->ctx->interrupt_callback = int_cb;
2931 ret = avformat_write_header(of->ctx, &of->opts);
2933 av_log(NULL, AV_LOG_ERROR,
2934 "Could not write header for output file #%d "
2935 "(incorrect codec parameters ?): %s\n",
2936 file_index, av_err2str(ret));
2939 //assert_avoptions(of->opts);
2940 of->header_written = 1;
2942 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2944 if (sdp_filename || want_sdp)
2947 /* flush the muxing queues */
2948 for (i = 0; i < of->ctx->nb_streams; i++) {
2949 OutputStream *ost = output_streams[of->ost_index + i];
2951 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2952 if (!av_fifo_size(ost->muxing_queue))
2953 ost->mux_timebase = ost->st->time_base;
2955 while (av_fifo_size(ost->muxing_queue)) {
2957 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2958 write_packet(of, &pkt, ost, 1);
2965 static int init_output_bsfs(OutputStream *ost)
2970 if (!ost->nb_bitstream_filters)
2973 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2974 ctx = ost->bsf_ctx[i];
2976 ret = avcodec_parameters_copy(ctx->par_in,
2977 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2981 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2983 ret = av_bsf_init(ctx);
2985 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2986 ost->bsf_ctx[i]->filter->name);
2991 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2992 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2996 ost->st->time_base = ctx->time_base_out;
3001 static int init_output_stream_streamcopy(OutputStream *ost)
3003 OutputFile *of = output_files[ost->file_index];
3004 InputStream *ist = get_input_stream(ost);
3005 AVCodecParameters *par_dst = ost->st->codecpar;
3006 AVCodecParameters *par_src = ost->ref_par;
3009 uint32_t codec_tag = par_dst->codec_tag;
3011 av_assert0(ist && !ost->filter);
3013 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3015 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3017 av_log(NULL, AV_LOG_FATAL,
3018 "Error setting up codec context options.\n");
3021 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3024 unsigned int codec_tag_tmp;
3025 if (!of->ctx->oformat->codec_tag ||
3026 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3027 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3028 codec_tag = par_src->codec_tag;
3031 ret = avcodec_parameters_copy(par_dst, par_src);
3035 par_dst->codec_tag = codec_tag;
3037 if (!ost->frame_rate.num)
3038 ost->frame_rate = ist->framerate;
3039 ost->st->avg_frame_rate = ost->frame_rate;
3041 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3045 // copy timebase while removing common factors
3046 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3047 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3049 // copy estimated duration as a hint to the muxer
3050 if (ost->st->duration <= 0 && ist->st->duration > 0)
3051 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3054 ost->st->disposition = ist->st->disposition;
3056 if (ist->st->nb_side_data) {
3057 for (i = 0; i < ist->st->nb_side_data; i++) {
3058 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3061 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3063 return AVERROR(ENOMEM);
3064 memcpy(dst_data, sd_src->data, sd_src->size);
3068 if (ost->rotate_overridden) {
3069 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3070 sizeof(int32_t) * 9);
3072 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3075 ost->parser = av_parser_init(par_dst->codec_id);
3076 ost->parser_avctx = avcodec_alloc_context3(NULL);
3077 if (!ost->parser_avctx)
3078 return AVERROR(ENOMEM);
3080 switch (par_dst->codec_type) {
3081 case AVMEDIA_TYPE_AUDIO:
3082 if (audio_volume != 256) {
3083 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3086 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3087 par_dst->block_align= 0;
3088 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3089 par_dst->block_align= 0;
3091 case AVMEDIA_TYPE_VIDEO:
3092 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3094 av_mul_q(ost->frame_aspect_ratio,
3095 (AVRational){ par_dst->height, par_dst->width });
3096 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3097 "with stream copy may produce invalid files\n");
3099 else if (ist->st->sample_aspect_ratio.num)
3100 sar = ist->st->sample_aspect_ratio;
3102 sar = par_src->sample_aspect_ratio;
3103 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3104 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3105 ost->st->r_frame_rate = ist->st->r_frame_rate;
3109 ost->mux_timebase = ist->st->time_base;
3114 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3116 AVDictionaryEntry *e;
3118 uint8_t *encoder_string;
3119 int encoder_string_len;
3120 int format_flags = 0;
3121 int codec_flags = 0;
3123 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3126 e = av_dict_get(of->opts, "fflags", NULL, 0);
3128 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3131 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3133 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3135 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3138 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3141 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3142 encoder_string = av_mallocz(encoder_string_len);
3143 if (!encoder_string)
3146 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3147 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3149 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3150 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3151 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3152 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3155 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3156 AVCodecContext *avctx)
3159 int n = 1, i, size, index = 0;
3162 for (p = kf; *p; p++)
3166 pts = av_malloc_array(size, sizeof(*pts));
3168 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3173 for (i = 0; i < n; i++) {
3174 char *next = strchr(p, ',');
3179 if (!memcmp(p, "chapters", 8)) {
3181 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3184 if (avf->nb_chapters > INT_MAX - size ||
3185 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3187 av_log(NULL, AV_LOG_FATAL,
3188 "Could not allocate forced key frames array.\n");
3191 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3192 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3194 for (j = 0; j < avf->nb_chapters; j++) {
3195 AVChapter *c = avf->chapters[j];
3196 av_assert1(index < size);
3197 pts[index++] = av_rescale_q(c->start, c->time_base,
3198 avctx->time_base) + t;
3203 t = parse_time_or_die("force_key_frames", p, 1);
3204 av_assert1(index < size);
3205 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3212 av_assert0(index == size);
3213 qsort(pts, size, sizeof(*pts), compare_int64);
3214 ost->forced_kf_count = size;
3215 ost->forced_kf_pts = pts;
3218 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3220 InputStream *ist = get_input_stream(ost);
3221 AVCodecContext *enc_ctx = ost->enc_ctx;
3222 AVFormatContext *oc;
3224 if (ost->enc_timebase.num > 0) {
3225 enc_ctx->time_base = ost->enc_timebase;
3229 if (ost->enc_timebase.num < 0) {
3231 enc_ctx->time_base = ist->st->time_base;
3235 oc = output_files[ost->file_index]->ctx;
3236 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3239 enc_ctx->time_base = default_time_base;
3242 static int init_output_stream_encode(OutputStream *ost)
3244 InputStream *ist = get_input_stream(ost);
3245 AVCodecContext *enc_ctx = ost->enc_ctx;
3246 AVCodecContext *dec_ctx = NULL;
3247 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3250 set_encoder_id(output_files[ost->file_index], ost);
3252 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3253 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3254 // which have to be filtered out to prevent leaking them to output files.
3255 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3258 ost->st->disposition = ist->st->disposition;
3260 dec_ctx = ist->dec_ctx;
3262 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3264 for (j = 0; j < oc->nb_streams; j++) {
3265 AVStream *st = oc->streams[j];
3266 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3269 if (j == oc->nb_streams)
3270 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3271 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3272 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3275 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3276 if (!ost->frame_rate.num)
3277 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3278 if (ist && !ost->frame_rate.num)
3279 ost->frame_rate = ist->framerate;
3280 if (ist && !ost->frame_rate.num)
3281 ost->frame_rate = ist->st->r_frame_rate;
3282 if (ist && !ost->frame_rate.num) {
3283 ost->frame_rate = (AVRational){25, 1};
3284 av_log(NULL, AV_LOG_WARNING,
3286 "about the input framerate is available. Falling "
3287 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3288 "if you want a different framerate.\n",
3289 ost->file_index, ost->index);
3291 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3292 if (ost->enc->supported_framerates && !ost->force_fps) {
3293 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3294 ost->frame_rate = ost->enc->supported_framerates[idx];
3296 // reduce frame rate for mpeg4 to be within the spec limits
3297 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3298 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3299 ost->frame_rate.num, ost->frame_rate.den, 65535);
3303 switch (enc_ctx->codec_type) {
3304 case AVMEDIA_TYPE_AUDIO:
3305 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3307 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3308 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3309 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3310 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3311 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3313 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3316 case AVMEDIA_TYPE_VIDEO:
3317 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3319 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3320 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3321 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3322 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3323 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3324 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3326 for (j = 0; j < ost->forced_kf_count; j++)
3327 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3329 enc_ctx->time_base);
3331 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3332 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3333 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3334 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3335 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3336 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3337 if (!strncmp(ost->enc->name, "libx264", 7) &&
3338 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3339 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3340 av_log(NULL, AV_LOG_WARNING,
3341 "No pixel format specified, %s for H.264 encoding chosen.\n"
3342 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3343 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3344 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3345 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3346 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3347 av_log(NULL, AV_LOG_WARNING,
3348 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3349 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3350 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3351 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3353 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3354 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3356 enc_ctx->framerate = ost->frame_rate;
3358 ost->st->avg_frame_rate = ost->frame_rate;
3361 enc_ctx->width != dec_ctx->width ||
3362 enc_ctx->height != dec_ctx->height ||
3363 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3364 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3367 if (ost->forced_keyframes) {
3368 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3369 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3370 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3372 av_log(NULL, AV_LOG_ERROR,
3373 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3376 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3377 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3378 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3379 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3381 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3382 // parse it only for static kf timings
3383 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3384 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3388 case AVMEDIA_TYPE_SUBTITLE:
3389 enc_ctx->time_base = AV_TIME_BASE_Q;
3390 if (!enc_ctx->width) {
3391 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3392 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3395 case AVMEDIA_TYPE_DATA:
3402 ost->mux_timebase = enc_ctx->time_base;
3407 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3411 if (ost->encoding_needed) {
3412 AVCodec *codec = ost->enc;
3413 AVCodecContext *dec = NULL;
3416 ret = init_output_stream_encode(ost);
3420 if ((ist = get_input_stream(ost)))
3422 if (dec && dec->subtitle_header) {
3423 /* ASS code assumes this buffer is null terminated so add extra byte. */
3424 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3425 if (!ost->enc_ctx->subtitle_header)
3426 return AVERROR(ENOMEM);
3427 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3428 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3430 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3431 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3432 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3434 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3435 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3436 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3438 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3439 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3440 av_buffersink_get_format(ost->filter->filter)) {
3441 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3442 if (!ost->enc_ctx->hw_frames_ctx)
3443 return AVERROR(ENOMEM);
3446 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3447 if (ret == AVERROR_EXPERIMENTAL)
3448 abort_codec_experimental(codec, 1);
3449 snprintf(error, error_len,
3450 "Error while opening encoder for output stream #%d:%d - "
3451 "maybe incorrect parameters such as bit_rate, rate, width or height",
3452 ost->file_index, ost->index);
3455 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3456 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3457 av_buffersink_set_frame_size(ost->filter->filter,
3458 ost->enc_ctx->frame_size);
3459 assert_avoptions(ost->encoder_opts);
3460 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3461 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3462 " It takes bits/s as argument, not kbits/s\n");
3464 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3466 av_log(NULL, AV_LOG_FATAL,
3467 "Error initializing the output stream codec context.\n");
3471 * FIXME: ost->st->codec should't be needed here anymore.
3473 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3477 if (ost->enc_ctx->nb_coded_side_data) {
3480 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3481 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3484 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3486 return AVERROR(ENOMEM);
3487 memcpy(dst_data, sd_src->data, sd_src->size);
3492 * Add global input side data. For now this is naive, and copies it
3493 * from the input stream's global side data. All side data should
3494 * really be funneled over AVFrame and libavfilter, then added back to
3495 * packet side data, and then potentially using the first packet for
3500 for (i = 0; i < ist->st->nb_side_data; i++) {
3501 AVPacketSideData *sd = &ist->st->side_data[i];
3502 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3504 return AVERROR(ENOMEM);
3505 memcpy(dst, sd->data, sd->size);
3506 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3507 av_display_rotation_set((uint32_t *)dst, 0);
3511 // copy timebase while removing common factors
3512 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3513 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3515 // copy estimated duration as a hint to the muxer
3516 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3517 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3519 ost->st->codec->codec= ost->enc_ctx->codec;
3520 } else if (ost->stream_copy) {
3521 ret = init_output_stream_streamcopy(ost);
3526 * FIXME: will the codec context used by the parser during streamcopy
3527 * This should go away with the new parser API.
3529 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3534 // parse user provided disposition, and update stream values
3535 if (ost->disposition) {
3536 static const AVOption opts[] = {
3537 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3538 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3539 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3540 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3541 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3542 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3543 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3544 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3545 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3546 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3547 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3548 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3549 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3550 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3553 static const AVClass class = {
3555 .item_name = av_default_item_name,
3557 .version = LIBAVUTIL_VERSION_INT,
3559 const AVClass *pclass = &class;
3561 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3566 /* initialize bitstream filters for the output stream
3567 * needs to be done here, because the codec id for streamcopy is not
3568 * known until now */
3569 ret = init_output_bsfs(ost);
3573 ost->initialized = 1;
3575 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3582 static void report_new_stream(int input_index, AVPacket *pkt)
3584 InputFile *file = input_files[input_index];
3585 AVStream *st = file->ctx->streams[pkt->stream_index];
3587 if (pkt->stream_index < file->nb_streams_warn)
3589 av_log(file->ctx, AV_LOG_WARNING,
3590 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3591 av_get_media_type_string(st->codecpar->codec_type),
3592 input_index, pkt->stream_index,
3593 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3594 file->nb_streams_warn = pkt->stream_index + 1;
3597 static int transcode_init(void)
3599 int ret = 0, i, j, k;
3600 AVFormatContext *oc;
3603 char error[1024] = {0};
3605 for (i = 0; i < nb_filtergraphs; i++) {
3606 FilterGraph *fg = filtergraphs[i];
3607 for (j = 0; j < fg->nb_outputs; j++) {
3608 OutputFilter *ofilter = fg->outputs[j];
3609 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3611 if (fg->nb_inputs != 1)
3613 for (k = nb_input_streams-1; k >= 0 ; k--)
3614 if (fg->inputs[0]->ist == input_streams[k])
3616 ofilter->ost->source_index = k;
3620 /* init framerate emulation */
3621 for (i = 0; i < nb_input_files; i++) {
3622 InputFile *ifile = input_files[i];
3623 if (ifile->rate_emu)
3624 for (j = 0; j < ifile->nb_streams; j++)
3625 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3628 /* init input streams */
3629 for (i = 0; i < nb_input_streams; i++)
3630 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3631 for (i = 0; i < nb_output_streams; i++) {
3632 ost = output_streams[i];
3633 avcodec_close(ost->enc_ctx);
3638 /* open each encoder */
3639 for (i = 0; i < nb_output_streams; i++) {
3640 // skip streams fed from filtergraphs until we have a frame for them
3641 if (output_streams[i]->filter)
3644 ret = init_output_stream(output_streams[i], error, sizeof(error));
3649 /* discard unused programs */
3650 for (i = 0; i < nb_input_files; i++) {
3651 InputFile *ifile = input_files[i];
3652 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3653 AVProgram *p = ifile->ctx->programs[j];
3654 int discard = AVDISCARD_ALL;
3656 for (k = 0; k < p->nb_stream_indexes; k++)
3657 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3658 discard = AVDISCARD_DEFAULT;
3661 p->discard = discard;
3665 /* write headers for files with no streams */
3666 for (i = 0; i < nb_output_files; i++) {
3667 oc = output_files[i]->ctx;
3668 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3669 ret = check_init_output_file(output_files[i], i);
3676 /* dump the stream mapping */
3677 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3678 for (i = 0; i < nb_input_streams; i++) {
3679 ist = input_streams[i];
3681 for (j = 0; j < ist->nb_filters; j++) {
3682 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3683 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3684 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3685 ist->filters[j]->name);
3686 if (nb_filtergraphs > 1)
3687 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3688 av_log(NULL, AV_LOG_INFO, "\n");
3693 for (i = 0; i < nb_output_streams; i++) {
3694 ost = output_streams[i];
3696 if (ost->attachment_filename) {
3697 /* an attached file */
3698 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3699 ost->attachment_filename, ost->file_index, ost->index);
3703 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3704 /* output from a complex graph */
3705 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3706 if (nb_filtergraphs > 1)
3707 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3709 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3710 ost->index, ost->enc ? ost->enc->name : "?");
3714 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3715 input_streams[ost->source_index]->file_index,
3716 input_streams[ost->source_index]->st->index,
3719 if (ost->sync_ist != input_streams[ost->source_index])
3720 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3721 ost->sync_ist->file_index,
3722 ost->sync_ist->st->index);
3723 if (ost->stream_copy)
3724 av_log(NULL, AV_LOG_INFO, " (copy)");
3726 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3727 const AVCodec *out_codec = ost->enc;
3728 const char *decoder_name = "?";
3729 const char *in_codec_name = "?";
3730 const char *encoder_name = "?";
3731 const char *out_codec_name = "?";
3732 const AVCodecDescriptor *desc;
3735 decoder_name = in_codec->name;
3736 desc = avcodec_descriptor_get(in_codec->id);
3738 in_codec_name = desc->name;
3739 if (!strcmp(decoder_name, in_codec_name))
3740 decoder_name = "native";
3744 encoder_name = out_codec->name;
3745 desc = avcodec_descriptor_get(out_codec->id);
3747 out_codec_name = desc->name;
3748 if (!strcmp(encoder_name, out_codec_name))
3749 encoder_name = "native";
3752 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3753 in_codec_name, decoder_name,
3754 out_codec_name, encoder_name);
3756 av_log(NULL, AV_LOG_INFO, "\n");
3760 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3764 atomic_store(&transcode_init_done, 1);
3769 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3770 static int need_output(void)
3774 for (i = 0; i < nb_output_streams; i++) {
3775 OutputStream *ost = output_streams[i];
3776 OutputFile *of = output_files[ost->file_index];
3777 AVFormatContext *os = output_files[ost->file_index]->ctx;
3779 if (ost->finished ||
3780 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3782 if (ost->frame_number >= ost->max_frames) {
3784 for (j = 0; j < of->ctx->nb_streams; j++)
3785 close_output_stream(output_streams[of->ost_index + j]);
3796 * Select the output stream to process.
3798 * @return selected output stream, or NULL if none available
3800 static OutputStream *choose_output(void)
3803 int64_t opts_min = INT64_MAX;
3804 OutputStream *ost_min = NULL;
3806 for (i = 0; i < nb_output_streams; i++) {
3807 OutputStream *ost = output_streams[i];
3808 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3809 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3811 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3812 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3814 if (!ost->initialized && !ost->inputs_done)
3817 if (!ost->finished && opts < opts_min) {
3819 ost_min = ost->unavailable ? NULL : ost;
3825 static void set_tty_echo(int on)
3829 if (tcgetattr(0, &tty) == 0) {
3830 if (on) tty.c_lflag |= ECHO;
3831 else tty.c_lflag &= ~ECHO;
3832 tcsetattr(0, TCSANOW, &tty);
3837 static int check_keyboard_interaction(int64_t cur_time)
3840 static int64_t last_time;
3841 if (received_nb_signals)
3842 return AVERROR_EXIT;
3843 /* read_key() returns 0 on EOF */
3844 if(cur_time - last_time >= 100000 && !run_as_daemon){
3846 last_time = cur_time;
3850 return AVERROR_EXIT;
3851 if (key == '+') av_log_set_level(av_log_get_level()+10);
3852 if (key == '-') av_log_set_level(av_log_get_level()-10);
3853 if (key == 's') qp_hist ^= 1;
3856 do_hex_dump = do_pkt_dump = 0;
3857 } else if(do_pkt_dump){
3861 av_log_set_level(AV_LOG_DEBUG);
3863 if (key == 'c' || key == 'C'){
3864 char buf[4096], target[64], command[256], arg[256] = {0};
3867 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3870 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3875 fprintf(stderr, "\n");
3877 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3878 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3879 target, time, command, arg);
3880 for (i = 0; i < nb_filtergraphs; i++) {
3881 FilterGraph *fg = filtergraphs[i];
3884 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3885 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3886 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3887 } else if (key == 'c') {
3888 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3889 ret = AVERROR_PATCHWELCOME;
3891 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3893 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3898 av_log(NULL, AV_LOG_ERROR,
3899 "Parse error, at least 3 arguments were expected, "
3900 "only %d given in string '%s'\n", n, buf);
3903 if (key == 'd' || key == 'D'){
3906 debug = input_streams[0]->st->codec->debug<<1;
3907 if(!debug) debug = 1;
3908 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3915 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3920 fprintf(stderr, "\n");
3921 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3922 fprintf(stderr,"error parsing debug value\n");
3924 for(i=0;i<nb_input_streams;i++) {
3925 input_streams[i]->st->codec->debug = debug;
3927 for(i=0;i<nb_output_streams;i++) {
3928 OutputStream *ost = output_streams[i];
3929 ost->enc_ctx->debug = debug;
3931 if(debug) av_log_set_level(AV_LOG_DEBUG);
3932 fprintf(stderr,"debug=%d\n", debug);
3935 fprintf(stderr, "key function\n"
3936 "? show this help\n"
3937 "+ increase verbosity\n"
3938 "- decrease verbosity\n"
3939 "c Send command to first matching filter supporting it\n"
3940 "C Send/Queue command to all matching filters\n"
3941 "D cycle through available debug modes\n"
3942 "h dump packets/hex press to cycle through the 3 states\n"
3944 "s Show QP histogram\n"
3951 static void *input_thread(void *arg)
3954 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3959 ret = av_read_frame(f->ctx, &pkt);
3961 if (ret == AVERROR(EAGAIN)) {
3966 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3969 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3970 if (flags && ret == AVERROR(EAGAIN)) {
3972 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3973 av_log(f->ctx, AV_LOG_WARNING,
3974 "Thread message queue blocking; consider raising the "
3975 "thread_queue_size option (current value: %d)\n",
3976 f->thread_queue_size);
3979 if (ret != AVERROR_EOF)
3980 av_log(f->ctx, AV_LOG_ERROR,
3981 "Unable to send packet to main thread: %s\n",
3983 av_packet_unref(&pkt);
3984 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3992 static void free_input_threads(void)
3996 for (i = 0; i < nb_input_files; i++) {
3997 InputFile *f = input_files[i];
4000 if (!f || !f->in_thread_queue)
4002 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4003 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4004 av_packet_unref(&pkt);
4006 pthread_join(f->thread, NULL);
4008 av_thread_message_queue_free(&f->in_thread_queue);
4012 static int init_input_threads(void)
4016 if (nb_input_files == 1)
4019 for (i = 0; i < nb_input_files; i++) {
4020 InputFile *f = input_files[i];
4022 if (f->ctx->pb ? !f->ctx->pb->seekable :
4023 strcmp(f->ctx->iformat->name, "lavfi"))
4024 f->non_blocking = 1;
4025 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4026 f->thread_queue_size, sizeof(AVPacket));
4030 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4031 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4032 av_thread_message_queue_free(&f->in_thread_queue);
4033 return AVERROR(ret);
4039 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4041 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4043 AV_THREAD_MESSAGE_NONBLOCK : 0);
4047 static int get_input_packet(InputFile *f, AVPacket *pkt)
4051 for (i = 0; i < f->nb_streams; i++) {
4052 InputStream *ist = input_streams[f->ist_index + i];
4053 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4054 int64_t now = av_gettime_relative() - ist->start;
4056 return AVERROR(EAGAIN);
4061 if (nb_input_files > 1)
4062 return get_input_packet_mt(f, pkt);
4064 return av_read_frame(f->ctx, pkt);
4067 static int got_eagain(void)
4070 for (i = 0; i < nb_output_streams; i++)
4071 if (output_streams[i]->unavailable)
4076 static void reset_eagain(void)
4079 for (i = 0; i < nb_input_files; i++)
4080 input_files[i]->eagain = 0;
4081 for (i = 0; i < nb_output_streams; i++)
4082 output_streams[i]->unavailable = 0;
4085 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4086 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4087 AVRational time_base)
4093 return tmp_time_base;
4096 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4099 return tmp_time_base;
4105 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4108 AVCodecContext *avctx;
4109 int i, ret, has_audio = 0;
4110 int64_t duration = 0;
4112 ret = av_seek_frame(is, -1, is->start_time, 0);
4116 for (i = 0; i < ifile->nb_streams; i++) {
4117 ist = input_streams[ifile->ist_index + i];
4118 avctx = ist->dec_ctx;
4121 if (ist->decoding_needed) {
4122 process_input_packet(ist, NULL, 1);
4123 avcodec_flush_buffers(avctx);
4126 /* duration is the length of the last frame in a stream
4127 * when audio stream is present we don't care about
4128 * last video frame length because it's not defined exactly */
4129 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4133 for (i = 0; i < ifile->nb_streams; i++) {
4134 ist = input_streams[ifile->ist_index + i];
4135 avctx = ist->dec_ctx;
4138 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4139 AVRational sample_rate = {1, avctx->sample_rate};
4141 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4145 if (ist->framerate.num) {
4146 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4147 } else if (ist->st->avg_frame_rate.num) {
4148 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4149 } else duration = 1;
4151 if (!ifile->duration)
4152 ifile->time_base = ist->st->time_base;
4153 /* the total duration of the stream, max_pts - min_pts is
4154 * the duration of the stream without the last frame */
4155 duration += ist->max_pts - ist->min_pts;
4156 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4160 if (ifile->loop > 0)
4168 * - 0 -- one packet was read and processed
4169 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4170 * this function should be called again
4171 * - AVERROR_EOF -- this function should not be called again
4173 static int process_input(int file_index)
4175 InputFile *ifile = input_files[file_index];
4176 AVFormatContext *is;
4184 ret = get_input_packet(ifile, &pkt);
4186 if (ret == AVERROR(EAGAIN)) {
4190 if (ret < 0 && ifile->loop) {
4191 if ((ret = seek_to_start(ifile, is)) < 0)
4193 ret = get_input_packet(ifile, &pkt);
4194 if (ret == AVERROR(EAGAIN)) {
4200 if (ret != AVERROR_EOF) {
4201 print_error(is->filename, ret);
4206 for (i = 0; i < ifile->nb_streams; i++) {
4207 ist = input_streams[ifile->ist_index + i];
4208 if (ist->decoding_needed) {
4209 ret = process_input_packet(ist, NULL, 0);
4214 /* mark all outputs that don't go through lavfi as finished */
4215 for (j = 0; j < nb_output_streams; j++) {
4216 OutputStream *ost = output_streams[j];
4218 if (ost->source_index == ifile->ist_index + i &&
4219 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4220 finish_output_stream(ost);
4224 ifile->eof_reached = 1;
4225 return AVERROR(EAGAIN);
4231 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4232 is->streams[pkt.stream_index]);
4234 /* the following test is needed in case new streams appear
4235 dynamically in stream : we ignore them */
4236 if (pkt.stream_index >= ifile->nb_streams) {
4237 report_new_stream(file_index, &pkt);
4238 goto discard_packet;
4241 ist = input_streams[ifile->ist_index + pkt.stream_index];
4243 ist->data_size += pkt.size;
4247 goto discard_packet;
4249 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4250 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4255 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4256 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4257 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4258 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4259 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4260 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4261 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4262 av_ts2str(input_files[ist->file_index]->ts_offset),
4263 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4266 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4267 int64_t stime, stime2;
4268 // Correcting starttime based on the enabled streams
4269 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4270 // so we instead do it here as part of discontinuity handling
4271 if ( ist->next_dts == AV_NOPTS_VALUE
4272 && ifile->ts_offset == -is->start_time
4273 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4274 int64_t new_start_time = INT64_MAX;
4275 for (i=0; i<is->nb_streams; i++) {
4276 AVStream *st = is->streams[i];
4277 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4279 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4281 if (new_start_time > is->start_time) {
4282 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4283 ifile->ts_offset = -new_start_time;
4287 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4288 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4289 ist->wrap_correction_done = 1;
4291 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4292 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4293 ist->wrap_correction_done = 0;
4295 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4296 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4297 ist->wrap_correction_done = 0;
4301 /* add the stream-global side data to the first packet */
4302 if (ist->nb_packets == 1) {
4303 for (i = 0; i < ist->st->nb_side_data; i++) {
4304 AVPacketSideData *src_sd = &ist->st->side_data[i];
4307 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4310 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4313 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4317 memcpy(dst_data, src_sd->data, src_sd->size);
4321 if (pkt.dts != AV_NOPTS_VALUE)
4322 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4323 if (pkt.pts != AV_NOPTS_VALUE)
4324 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4326 if (pkt.pts != AV_NOPTS_VALUE)
4327 pkt.pts *= ist->ts_scale;
4328 if (pkt.dts != AV_NOPTS_VALUE)
4329 pkt.dts *= ist->ts_scale;
4331 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4332 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4333 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4334 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4335 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4336 int64_t delta = pkt_dts - ifile->last_ts;
4337 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4338 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4339 ifile->ts_offset -= delta;
4340 av_log(NULL, AV_LOG_DEBUG,
4341 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4342 delta, ifile->ts_offset);
4343 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4344 if (pkt.pts != AV_NOPTS_VALUE)
4345 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4349 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4350 if (pkt.pts != AV_NOPTS_VALUE) {
4351 pkt.pts += duration;
4352 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4353 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4356 if (pkt.dts != AV_NOPTS_VALUE)
4357 pkt.dts += duration;
4359 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4360 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4361 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4362 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4364 int64_t delta = pkt_dts - ist->next_dts;
4365 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4366 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4367 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4368 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4369 ifile->ts_offset -= delta;
4370 av_log(NULL, AV_LOG_DEBUG,
4371 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4372 delta, ifile->ts_offset);
4373 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4374 if (pkt.pts != AV_NOPTS_VALUE)
4375 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4378 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4379 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4380 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4381 pkt.dts = AV_NOPTS_VALUE;
4383 if (pkt.pts != AV_NOPTS_VALUE){
4384 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4385 delta = pkt_pts - ist->next_dts;
4386 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4387 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4388 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4389 pkt.pts = AV_NOPTS_VALUE;
4395 if (pkt.dts != AV_NOPTS_VALUE)
4396 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4399 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4400 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4401 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4402 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4403 av_ts2str(input_files[ist->file_index]->ts_offset),
4404 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4407 sub2video_heartbeat(ist, pkt.pts);
4409 process_input_packet(ist, &pkt, 0);
4412 av_packet_unref(&pkt);
4418 * Perform a step of transcoding for the specified filter graph.
4420 * @param[in] graph filter graph to consider
4421 * @param[out] best_ist input stream where a frame would allow to continue
4422 * @return 0 for success, <0 for error
4424 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4427 int nb_requests, nb_requests_max = 0;
4428 InputFilter *ifilter;
4432 ret = avfilter_graph_request_oldest(graph->graph);
4434 return reap_filters(0);
4436 if (ret == AVERROR_EOF) {
4437 ret = reap_filters(1);
4438 for (i = 0; i < graph->nb_outputs; i++)
4439 close_output_stream(graph->outputs[i]->ost);
4442 if (ret != AVERROR(EAGAIN))
4445 for (i = 0; i < graph->nb_inputs; i++) {
4446 ifilter = graph->inputs[i];
4448 if (input_files[ist->file_index]->eagain ||
4449 input_files[ist->file_index]->eof_reached)
4451 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4452 if (nb_requests > nb_requests_max) {
4453 nb_requests_max = nb_requests;
4459 for (i = 0; i < graph->nb_outputs; i++)
4460 graph->outputs[i]->ost->unavailable = 1;
4466 * Run a single step of transcoding.
4468 * @return 0 for success, <0 for error
4470 static int transcode_step(void)
4473 InputStream *ist = NULL;
4476 ost = choose_output();
4483 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4487 if (ost->filter && !ost->filter->graph->graph) {
4488 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4489 ret = configure_filtergraph(ost->filter->graph);
4491 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4497 if (ost->filter && ost->filter->graph->graph) {
4498 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4502 } else if (ost->filter) {
4504 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4505 InputFilter *ifilter = ost->filter->graph->inputs[i];
4506 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4512 ost->inputs_done = 1;
4516 av_assert0(ost->source_index >= 0);
4517 ist = input_streams[ost->source_index];
4520 ret = process_input(ist->file_index);
4521 if (ret == AVERROR(EAGAIN)) {
4522 if (input_files[ist->file_index]->eagain)
4523 ost->unavailable = 1;
4528 return ret == AVERROR_EOF ? 0 : ret;
4530 return reap_filters(0);
4534 * The following code is the main loop of the file converter
4536 static int transcode(void)
4539 AVFormatContext *os;
4542 int64_t timer_start;
4543 int64_t total_packets_written = 0;
4545 ret = transcode_init();
4549 if (stdin_interaction) {
4550 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4553 timer_start = av_gettime_relative();
4556 if ((ret = init_input_threads()) < 0)
4560 while (!received_sigterm) {
4561 int64_t cur_time= av_gettime_relative();
4563 /* if 'q' pressed, exits */
4564 if (stdin_interaction)
4565 if (check_keyboard_interaction(cur_time) < 0)
4568 /* check if there's any stream where output is still needed */
4569 if (!need_output()) {
4570 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4574 ret = transcode_step();
4575 if (ret < 0 && ret != AVERROR_EOF) {
4577 av_strerror(ret, errbuf, sizeof(errbuf));
4579 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4583 /* dump report by using the output first video and audio streams */
4584 print_report(0, timer_start, cur_time);
4587 free_input_threads();
4590 /* at the end of stream, we must flush the decoder buffers */
4591 for (i = 0; i < nb_input_streams; i++) {
4592 ist = input_streams[i];
4593 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4594 process_input_packet(ist, NULL, 0);
4601 /* write the trailer if needed and close file */
4602 for (i = 0; i < nb_output_files; i++) {
4603 os = output_files[i]->ctx;
4604 if (!output_files[i]->header_written) {
4605 av_log(NULL, AV_LOG_ERROR,
4606 "Nothing was written into output file %d (%s), because "
4607 "at least one of its streams received no packets.\n",
4611 if ((ret = av_write_trailer(os)) < 0) {
4612 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4618 /* dump report by using the first video and audio streams */
4619 print_report(1, timer_start, av_gettime_relative());
4621 /* close each encoder */
4622 for (i = 0; i < nb_output_streams; i++) {
4623 ost = output_streams[i];
4624 if (ost->encoding_needed) {
4625 av_freep(&ost->enc_ctx->stats_in);
4627 total_packets_written += ost->packets_written;
4630 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4631 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4635 /* close each decoder */
4636 for (i = 0; i < nb_input_streams; i++) {
4637 ist = input_streams[i];
4638 if (ist->decoding_needed) {
4639 avcodec_close(ist->dec_ctx);
4640 if (ist->hwaccel_uninit)
4641 ist->hwaccel_uninit(ist->dec_ctx);
4645 av_buffer_unref(&hw_device_ctx);
4652 free_input_threads();
4655 if (output_streams) {
4656 for (i = 0; i < nb_output_streams; i++) {
4657 ost = output_streams[i];
4660 if (fclose(ost->logfile))
4661 av_log(NULL, AV_LOG_ERROR,
4662 "Error closing logfile, loss of information possible: %s\n",
4663 av_err2str(AVERROR(errno)));
4664 ost->logfile = NULL;
4666 av_freep(&ost->forced_kf_pts);
4667 av_freep(&ost->apad);
4668 av_freep(&ost->disposition);
4669 av_dict_free(&ost->encoder_opts);
4670 av_dict_free(&ost->sws_dict);
4671 av_dict_free(&ost->swr_opts);
4672 av_dict_free(&ost->resample_opts);
4680 static int64_t getutime(void)
4683 struct rusage rusage;
4685 getrusage(RUSAGE_SELF, &rusage);
4686 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4687 #elif HAVE_GETPROCESSTIMES
4689 FILETIME c, e, k, u;
4690 proc = GetCurrentProcess();
4691 GetProcessTimes(proc, &c, &e, &k, &u);
4692 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4694 return av_gettime_relative();
4698 static int64_t getmaxrss(void)
4700 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4701 struct rusage rusage;
4702 getrusage(RUSAGE_SELF, &rusage);
4703 return (int64_t)rusage.ru_maxrss * 1024;
4704 #elif HAVE_GETPROCESSMEMORYINFO
4706 PROCESS_MEMORY_COUNTERS memcounters;
4707 proc = GetCurrentProcess();
4708 memcounters.cb = sizeof(memcounters);
4709 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4710 return memcounters.PeakPagefileUsage;
4716 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4720 int main(int argc, char **argv)
4727 register_exit(ffmpeg_cleanup);
4729 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4731 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4732 parse_loglevel(argc, argv, options);
4734 if(argc>1 && !strcmp(argv[1], "-d")){
4736 av_log_set_callback(log_callback_null);
4741 avcodec_register_all();
4743 avdevice_register_all();
4745 avfilter_register_all();
4747 avformat_network_init();
4749 show_banner(argc, argv, options);
4751 /* parse options and open all input/output files */
4752 ret = ffmpeg_parse_options(argc, argv);
4756 if (nb_output_files <= 0 && nb_input_files == 0) {
4758 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4762 /* file converter / grab */
4763 if (nb_output_files <= 0) {
4764 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4768 // if (nb_input_files == 0) {
4769 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4773 for (i = 0; i < nb_output_files; i++) {
4774 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4778 current_time = ti = getutime();
4779 if (transcode() < 0)
4781 ti = getutime() - ti;
4783 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4785 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4786 decode_error_stat[0], decode_error_stat[1]);
4787 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4790 exit_program(received_nb_signals ? 255 : main_return_code);
4791 return main_return_code;