2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && atomic_load(&transcode_init_done)) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
678 * Audio encoders may split the packets -- #frames in != #packets out.
679 * But there is no reordering, so we can limit the number of output packets
680 * by simply dropping them here.
681 * Counting encoded video frames needs to be done separately because of
682 * reordering, see do_video_out().
683 * Do not count the packet when unqueued because it has been counted when queued.
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
693 if (!of->header_written) {
694 AVPacket tmp_pkt = {0};
695 /* the muxer is not initialized yet, buffer the packet */
696 if (!av_fifo_space(ost->muxing_queue)) {
697 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698 ost->max_muxing_queue_size);
699 if (new_size <= av_fifo_size(ost->muxing_queue)) {
700 av_log(NULL, AV_LOG_ERROR,
701 "Too many packets buffered for output stream %d:%d.\n",
702 ost->file_index, ost->st->index);
705 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
709 ret = av_packet_ref(&tmp_pkt, pkt);
712 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713 av_packet_unref(pkt);
717 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
718 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
719 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 ost->quality = sd ? AV_RL32(sd) : -1;
726 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 ost->error[i] = AV_RL64(sd + 8 + 8*i);
735 if (ost->frame_rate.num && ost->is_cfr) {
736 if (pkt->duration > 0)
737 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
743 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746 if (pkt->dts != AV_NOPTS_VALUE &&
747 pkt->pts != AV_NOPTS_VALUE &&
748 pkt->dts > pkt->pts) {
749 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 ost->file_index, ost->st->index);
753 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
758 pkt->dts != AV_NOPTS_VALUE &&
759 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760 ost->last_mux_dts != AV_NOPTS_VALUE) {
761 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762 if (pkt->dts < max) {
763 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764 av_log(s, loglevel, "Non-monotonous DTS in output stream "
765 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
771 av_log(s, loglevel, "changing to %"PRId64". This may result "
772 "in incorrect timestamps in the output file.\n",
774 if (pkt->pts >= pkt->dts)
775 pkt->pts = FFMAX(pkt->pts, max);
780 ost->last_mux_dts = pkt->dts;
782 ost->data_size += pkt->size;
783 ost->packets_written++;
785 pkt->stream_index = ost->index;
788 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790 av_get_media_type_string(ost->enc_ctx->codec_type),
791 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
797 ret = av_interleaved_write_frame(s, pkt);
799 print_error("av_interleaved_write_frame()", ret);
800 main_return_code = 1;
801 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 av_packet_unref(pkt);
806 static void close_output_stream(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 ost->finished |= ENCODER_FINISHED;
812 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813 of->recording_time = FFMIN(of->recording_time, end);
817 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
821 /* apply the output bitstream filters, if any */
822 if (ost->nb_bitstream_filters) {
825 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
831 /* get a packet from the previous filter up the chain */
832 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
833 if (ret == AVERROR(EAGAIN)) {
840 /* send it to the next filter down the chain or to the muxer */
841 if (idx < ost->nb_bitstream_filters) {
842 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
847 write_packet(of, pkt, ost, 0);
850 write_packet(of, pkt, ost, 0);
853 if (ret < 0 && ret != AVERROR_EOF) {
854 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
855 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
861 static int check_recording_time(OutputStream *ost)
863 OutputFile *of = output_files[ost->file_index];
865 if (of->recording_time != INT64_MAX &&
866 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
867 AV_TIME_BASE_Q) >= 0) {
868 close_output_stream(ost);
874 static void do_audio_out(OutputFile *of, OutputStream *ost,
877 AVCodecContext *enc = ost->enc_ctx;
881 av_init_packet(&pkt);
885 if (!check_recording_time(ost))
888 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
889 frame->pts = ost->sync_opts;
890 ost->sync_opts = frame->pts + frame->nb_samples;
891 ost->samples_encoded += frame->nb_samples;
892 ost->frames_encoded++;
894 av_assert0(pkt.size || !pkt.data);
895 update_benchmark(NULL);
897 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
898 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
899 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
900 enc->time_base.num, enc->time_base.den);
903 ret = avcodec_send_frame(enc, frame);
908 ret = avcodec_receive_packet(enc, &pkt);
909 if (ret == AVERROR(EAGAIN))
914 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
916 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
919 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
920 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
921 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
922 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
925 output_packet(of, &pkt, ost);
930 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
934 static void do_subtitle_out(OutputFile *of,
938 int subtitle_out_max_size = 1024 * 1024;
939 int subtitle_out_size, nb, i;
944 if (sub->pts == AV_NOPTS_VALUE) {
945 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
954 subtitle_out = av_malloc(subtitle_out_max_size);
956 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
961 /* Note: DVB subtitle need one packet to draw them and one other
962 packet to clear them */
963 /* XXX: signal it in the codec context ? */
964 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
969 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
971 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
972 pts -= output_files[ost->file_index]->start_time;
973 for (i = 0; i < nb; i++) {
974 unsigned save_num_rects = sub->num_rects;
976 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
977 if (!check_recording_time(ost))
981 // start_display_time is required to be 0
982 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
983 sub->end_display_time -= sub->start_display_time;
984 sub->start_display_time = 0;
988 ost->frames_encoded++;
990 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
991 subtitle_out_max_size, sub);
993 sub->num_rects = save_num_rects;
994 if (subtitle_out_size < 0) {
995 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
999 av_init_packet(&pkt);
1000 pkt.data = subtitle_out;
1001 pkt.size = subtitle_out_size;
1002 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1003 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1004 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1005 /* XXX: the pts correction is handled here. Maybe handling
1006 it in the codec would be better */
1008 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1010 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1013 output_packet(of, &pkt, ost);
1017 static void do_video_out(OutputFile *of,
1019 AVFrame *next_picture,
1022 int ret, format_video_sync;
1024 AVCodecContext *enc = ost->enc_ctx;
1025 AVCodecParameters *mux_par = ost->st->codecpar;
1026 AVRational frame_rate;
1027 int nb_frames, nb0_frames, i;
1028 double delta, delta0;
1029 double duration = 0;
1031 InputStream *ist = NULL;
1032 AVFilterContext *filter = ost->filter->filter;
1034 if (ost->source_index >= 0)
1035 ist = input_streams[ost->source_index];
1037 frame_rate = av_buffersink_get_frame_rate(filter);
1038 if (frame_rate.num > 0 && frame_rate.den > 0)
1039 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1041 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1042 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1044 if (!ost->filters_script &&
1048 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1049 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1052 if (!next_picture) {
1054 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1055 ost->last_nb0_frames[1],
1056 ost->last_nb0_frames[2]);
1058 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1059 delta = delta0 + duration;
1061 /* by default, we output a single frame */
1062 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1065 format_video_sync = video_sync_method;
1066 if (format_video_sync == VSYNC_AUTO) {
1067 if(!strcmp(of->ctx->oformat->name, "avi")) {
1068 format_video_sync = VSYNC_VFR;
1070 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1072 && format_video_sync == VSYNC_CFR
1073 && input_files[ist->file_index]->ctx->nb_streams == 1
1074 && input_files[ist->file_index]->input_ts_offset == 0) {
1075 format_video_sync = VSYNC_VSCFR;
1077 if (format_video_sync == VSYNC_CFR && copy_ts) {
1078 format_video_sync = VSYNC_VSCFR;
1081 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1085 format_video_sync != VSYNC_PASSTHROUGH &&
1086 format_video_sync != VSYNC_DROP) {
1087 if (delta0 < -0.6) {
1088 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1090 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1091 sync_ipts = ost->sync_opts;
1096 switch (format_video_sync) {
1098 if (ost->frame_number == 0 && delta0 >= 0.5) {
1099 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1102 ost->sync_opts = lrint(sync_ipts);
1105 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1106 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1108 } else if (delta < -1.1)
1110 else if (delta > 1.1) {
1111 nb_frames = lrintf(delta);
1113 nb0_frames = lrintf(delta0 - 0.6);
1119 else if (delta > 0.6)
1120 ost->sync_opts = lrint(sync_ipts);
1123 case VSYNC_PASSTHROUGH:
1124 ost->sync_opts = lrint(sync_ipts);
1131 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1132 nb0_frames = FFMIN(nb0_frames, nb_frames);
1134 memmove(ost->last_nb0_frames + 1,
1135 ost->last_nb0_frames,
1136 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1137 ost->last_nb0_frames[0] = nb0_frames;
1139 if (nb0_frames == 0 && ost->last_dropped) {
1141 av_log(NULL, AV_LOG_VERBOSE,
1142 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1143 ost->frame_number, ost->st->index, ost->last_frame->pts);
1145 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1146 if (nb_frames > dts_error_threshold * 30) {
1147 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1151 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1152 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1153 if (nb_frames_dup > dup_warning) {
1154 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1158 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1160 /* duplicates frame if needed */
1161 for (i = 0; i < nb_frames; i++) {
1162 AVFrame *in_picture;
1163 av_init_packet(&pkt);
1167 if (i < nb0_frames && ost->last_frame) {
1168 in_picture = ost->last_frame;
1170 in_picture = next_picture;
1175 in_picture->pts = ost->sync_opts;
1178 if (!check_recording_time(ost))
1180 if (ost->frame_number >= ost->max_frames)
1184 #if FF_API_LAVF_FMT_RAWPICTURE
1185 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1186 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1187 /* raw pictures are written as AVPicture structure to
1188 avoid any copies. We support temporarily the older
1190 if (in_picture->interlaced_frame)
1191 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1193 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1194 pkt.data = (uint8_t *)in_picture;
1195 pkt.size = sizeof(AVPicture);
1196 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1197 pkt.flags |= AV_PKT_FLAG_KEY;
1199 output_packet(of, &pkt, ost);
1203 int forced_keyframe = 0;
1206 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1207 ost->top_field_first >= 0)
1208 in_picture->top_field_first = !!ost->top_field_first;
1210 if (in_picture->interlaced_frame) {
1211 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1212 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1214 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1216 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1218 in_picture->quality = enc->global_quality;
1219 in_picture->pict_type = 0;
1221 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1222 in_picture->pts * av_q2d(enc->time_base) : NAN;
1223 if (ost->forced_kf_index < ost->forced_kf_count &&
1224 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1225 ost->forced_kf_index++;
1226 forced_keyframe = 1;
1227 } else if (ost->forced_keyframes_pexpr) {
1229 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1230 res = av_expr_eval(ost->forced_keyframes_pexpr,
1231 ost->forced_keyframes_expr_const_values, NULL);
1232 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1233 ost->forced_keyframes_expr_const_values[FKF_N],
1234 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1235 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1236 ost->forced_keyframes_expr_const_values[FKF_T],
1237 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1240 forced_keyframe = 1;
1241 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1242 ost->forced_keyframes_expr_const_values[FKF_N];
1243 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1244 ost->forced_keyframes_expr_const_values[FKF_T];
1245 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1248 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1249 } else if ( ost->forced_keyframes
1250 && !strncmp(ost->forced_keyframes, "source", 6)
1251 && in_picture->key_frame==1) {
1252 forced_keyframe = 1;
1255 if (forced_keyframe) {
1256 in_picture->pict_type = AV_PICTURE_TYPE_I;
1257 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1260 update_benchmark(NULL);
1262 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1263 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1264 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1265 enc->time_base.num, enc->time_base.den);
1268 ost->frames_encoded++;
1270 ret = avcodec_send_frame(enc, in_picture);
1275 ret = avcodec_receive_packet(enc, &pkt);
1276 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1277 if (ret == AVERROR(EAGAIN))
1283 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1284 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1285 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1286 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1289 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1290 pkt.pts = ost->sync_opts;
1292 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1295 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1296 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1297 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1298 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1301 frame_size = pkt.size;
1302 output_packet(of, &pkt, ost);
1304 /* if two pass, output log */
1305 if (ost->logfile && enc->stats_out) {
1306 fprintf(ost->logfile, "%s", enc->stats_out);
1312 * For video, number of frames in == number of packets out.
1313 * But there may be reordering, so we can't throw away frames on encoder
1314 * flush, we need to limit them here, before they go into encoder.
1316 ost->frame_number++;
1318 if (vstats_filename && frame_size)
1319 do_video_stats(ost, frame_size);
1322 if (!ost->last_frame)
1323 ost->last_frame = av_frame_alloc();
1324 av_frame_unref(ost->last_frame);
1325 if (next_picture && ost->last_frame)
1326 av_frame_ref(ost->last_frame, next_picture);
1328 av_frame_free(&ost->last_frame);
1332 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1336 static double psnr(double d)
1338 return -10.0 * log10(d);
1341 static void do_video_stats(OutputStream *ost, int frame_size)
1343 AVCodecContext *enc;
1345 double ti1, bitrate, avg_bitrate;
1347 /* this is executed just the first time do_video_stats is called */
1349 vstats_file = fopen(vstats_filename, "w");
1357 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1358 frame_number = ost->st->nb_frames;
1359 if (vstats_version <= 1) {
1360 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1361 ost->quality / (float)FF_QP2LAMBDA);
1363 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1364 ost->quality / (float)FF_QP2LAMBDA);
1367 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1368 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1370 fprintf(vstats_file,"f_size= %6d ", frame_size);
1371 /* compute pts value */
1372 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1376 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1377 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1378 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1379 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1380 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1384 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1386 static void finish_output_stream(OutputStream *ost)
1388 OutputFile *of = output_files[ost->file_index];
1391 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1394 for (i = 0; i < of->ctx->nb_streams; i++)
1395 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1400 * Get and encode new output from any of the filtergraphs, without causing
1403 * @return 0 for success, <0 for severe errors
1405 static int reap_filters(int flush)
1407 AVFrame *filtered_frame = NULL;
1410 /* Reap all buffers present in the buffer sinks */
1411 for (i = 0; i < nb_output_streams; i++) {
1412 OutputStream *ost = output_streams[i];
1413 OutputFile *of = output_files[ost->file_index];
1414 AVFilterContext *filter;
1415 AVCodecContext *enc = ost->enc_ctx;
1418 if (!ost->filter || !ost->filter->graph->graph)
1420 filter = ost->filter->filter;
1422 if (!ost->initialized) {
1423 char error[1024] = "";
1424 ret = init_output_stream(ost, error, sizeof(error));
1426 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1427 ost->file_index, ost->index, error);
1432 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1433 return AVERROR(ENOMEM);
1435 filtered_frame = ost->filtered_frame;
1438 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1439 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1440 AV_BUFFERSINK_FLAG_NO_REQUEST);
1442 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1443 av_log(NULL, AV_LOG_WARNING,
1444 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1445 } else if (flush && ret == AVERROR_EOF) {
1446 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1447 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1451 if (ost->finished) {
1452 av_frame_unref(filtered_frame);
1455 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1456 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1457 AVRational filter_tb = av_buffersink_get_time_base(filter);
1458 AVRational tb = enc->time_base;
1459 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1461 tb.den <<= extra_bits;
1463 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1464 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1465 float_pts /= 1 << extra_bits;
1466 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1467 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1469 filtered_frame->pts =
1470 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1471 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1473 //if (ost->source_index >= 0)
1474 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1476 switch (av_buffersink_get_type(filter)) {
1477 case AVMEDIA_TYPE_VIDEO:
1478 if (!ost->frame_aspect_ratio.num)
1479 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1482 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1483 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1485 enc->time_base.num, enc->time_base.den);
1488 do_video_out(of, ost, filtered_frame, float_pts);
1490 case AVMEDIA_TYPE_AUDIO:
1491 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1492 enc->channels != filtered_frame->channels) {
1493 av_log(NULL, AV_LOG_ERROR,
1494 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1497 do_audio_out(of, ost, filtered_frame);
1500 // TODO support subtitle filters
1504 av_frame_unref(filtered_frame);
1511 static void print_final_stats(int64_t total_size)
1513 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1514 uint64_t subtitle_size = 0;
1515 uint64_t data_size = 0;
1516 float percent = -1.0;
1520 for (i = 0; i < nb_output_streams; i++) {
1521 OutputStream *ost = output_streams[i];
1522 switch (ost->enc_ctx->codec_type) {
1523 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1524 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1525 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1526 default: other_size += ost->data_size; break;
1528 extra_size += ost->enc_ctx->extradata_size;
1529 data_size += ost->data_size;
1530 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1531 != AV_CODEC_FLAG_PASS1)
1535 if (data_size && total_size>0 && total_size >= data_size)
1536 percent = 100.0 * (total_size - data_size) / data_size;
1538 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1539 video_size / 1024.0,
1540 audio_size / 1024.0,
1541 subtitle_size / 1024.0,
1542 other_size / 1024.0,
1543 extra_size / 1024.0);
1545 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1547 av_log(NULL, AV_LOG_INFO, "unknown");
1548 av_log(NULL, AV_LOG_INFO, "\n");
1550 /* print verbose per-stream stats */
1551 for (i = 0; i < nb_input_files; i++) {
1552 InputFile *f = input_files[i];
1553 uint64_t total_packets = 0, total_size = 0;
1555 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1556 i, f->ctx->filename);
1558 for (j = 0; j < f->nb_streams; j++) {
1559 InputStream *ist = input_streams[f->ist_index + j];
1560 enum AVMediaType type = ist->dec_ctx->codec_type;
1562 total_size += ist->data_size;
1563 total_packets += ist->nb_packets;
1565 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1566 i, j, media_type_string(type));
1567 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1568 ist->nb_packets, ist->data_size);
1570 if (ist->decoding_needed) {
1571 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1572 ist->frames_decoded);
1573 if (type == AVMEDIA_TYPE_AUDIO)
1574 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1575 av_log(NULL, AV_LOG_VERBOSE, "; ");
1578 av_log(NULL, AV_LOG_VERBOSE, "\n");
1581 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1582 total_packets, total_size);
1585 for (i = 0; i < nb_output_files; i++) {
1586 OutputFile *of = output_files[i];
1587 uint64_t total_packets = 0, total_size = 0;
1589 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1590 i, of->ctx->filename);
1592 for (j = 0; j < of->ctx->nb_streams; j++) {
1593 OutputStream *ost = output_streams[of->ost_index + j];
1594 enum AVMediaType type = ost->enc_ctx->codec_type;
1596 total_size += ost->data_size;
1597 total_packets += ost->packets_written;
1599 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1600 i, j, media_type_string(type));
1601 if (ost->encoding_needed) {
1602 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1603 ost->frames_encoded);
1604 if (type == AVMEDIA_TYPE_AUDIO)
1605 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1606 av_log(NULL, AV_LOG_VERBOSE, "; ");
1609 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1610 ost->packets_written, ost->data_size);
1612 av_log(NULL, AV_LOG_VERBOSE, "\n");
1615 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1616 total_packets, total_size);
1618 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1619 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1621 av_log(NULL, AV_LOG_WARNING, "\n");
1623 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1628 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1631 AVBPrint buf_script;
1633 AVFormatContext *oc;
1635 AVCodecContext *enc;
1636 int frame_number, vid, i;
1639 int64_t pts = INT64_MIN + 1;
1640 static int64_t last_time = -1;
1641 static int qp_histogram[52];
1642 int hours, mins, secs, us;
1646 if (!print_stats && !is_last_report && !progress_avio)
1649 if (!is_last_report) {
1650 if (last_time == -1) {
1651 last_time = cur_time;
1654 if ((cur_time - last_time) < 500000)
1656 last_time = cur_time;
1659 t = (cur_time-timer_start) / 1000000.0;
1662 oc = output_files[0]->ctx;
1664 total_size = avio_size(oc->pb);
1665 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1666 total_size = avio_tell(oc->pb);
1670 av_bprint_init(&buf_script, 0, 1);
1671 for (i = 0; i < nb_output_streams; i++) {
1673 ost = output_streams[i];
1675 if (!ost->stream_copy)
1676 q = ost->quality / (float) FF_QP2LAMBDA;
1678 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1679 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1680 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1681 ost->file_index, ost->index, q);
1683 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1686 frame_number = ost->frame_number;
1687 fps = t > 1 ? frame_number / t : 0;
1688 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1689 frame_number, fps < 9.95, fps, q);
1690 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1691 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1692 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1693 ost->file_index, ost->index, q);
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1699 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1701 for (j = 0; j < 32; j++)
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1705 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1707 double error, error_sum = 0;
1708 double scale, scale_sum = 0;
1710 char type[3] = { 'Y','U','V' };
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1712 for (j = 0; j < 3; j++) {
1713 if (is_last_report) {
1714 error = enc->error[j];
1715 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1717 error = ost->error[j];
1718 scale = enc->width * enc->height * 255.0 * 255.0;
1724 p = psnr(error / scale);
1725 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1726 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1727 ost->file_index, ost->index, type[j] | 32, p);
1729 p = psnr(error_sum / scale_sum);
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1731 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1732 ost->file_index, ost->index, p);
1736 /* compute min output value */
1737 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1738 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1739 ost->st->time_base, AV_TIME_BASE_Q));
1741 nb_frames_drop += ost->last_dropped;
1744 secs = FFABS(pts) / AV_TIME_BASE;
1745 us = FFABS(pts) % AV_TIME_BASE;
1751 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1752 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1754 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1756 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1757 "size=%8.0fkB time=", total_size / 1024.0);
1759 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1761 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1762 (100 * us) / AV_TIME_BASE);
1765 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1766 av_bprintf(&buf_script, "bitrate=N/A\n");
1768 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1769 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1772 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1773 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1774 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1775 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1776 hours, mins, secs, us);
1778 if (nb_frames_dup || nb_frames_drop)
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1780 nb_frames_dup, nb_frames_drop);
1781 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1782 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1785 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1786 av_bprintf(&buf_script, "speed=N/A\n");
1788 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1789 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1792 if (print_stats || is_last_report) {
1793 const char end = is_last_report ? '\n' : '\r';
1794 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1795 fprintf(stderr, "%s %c", buf, end);
1797 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1802 if (progress_avio) {
1803 av_bprintf(&buf_script, "progress=%s\n",
1804 is_last_report ? "end" : "continue");
1805 avio_write(progress_avio, buf_script.str,
1806 FFMIN(buf_script.len, buf_script.size - 1));
1807 avio_flush(progress_avio);
1808 av_bprint_finalize(&buf_script, NULL);
1809 if (is_last_report) {
1810 if ((ret = avio_closep(&progress_avio)) < 0)
1811 av_log(NULL, AV_LOG_ERROR,
1812 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1817 print_final_stats(total_size);
1820 static void flush_encoders(void)
1824 for (i = 0; i < nb_output_streams; i++) {
1825 OutputStream *ost = output_streams[i];
1826 AVCodecContext *enc = ost->enc_ctx;
1827 OutputFile *of = output_files[ost->file_index];
1829 if (!ost->encoding_needed)
1832 // Try to enable encoding with no input frames.
1833 // Maybe we should just let encoding fail instead.
1834 if (!ost->initialized) {
1835 FilterGraph *fg = ost->filter->graph;
1836 char error[1024] = "";
1838 av_log(NULL, AV_LOG_WARNING,
1839 "Finishing stream %d:%d without any data written to it.\n",
1840 ost->file_index, ost->st->index);
1842 if (ost->filter && !fg->graph) {
1844 for (x = 0; x < fg->nb_inputs; x++) {
1845 InputFilter *ifilter = fg->inputs[x];
1846 if (ifilter->format < 0) {
1847 AVCodecParameters *par = ifilter->ist->st->codecpar;
1848 // We never got any input. Set a fake format, which will
1849 // come from libavformat.
1850 ifilter->format = par->format;
1851 ifilter->sample_rate = par->sample_rate;
1852 ifilter->channels = par->channels;
1853 ifilter->channel_layout = par->channel_layout;
1854 ifilter->width = par->width;
1855 ifilter->height = par->height;
1856 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1860 if (!ifilter_has_all_input_formats(fg))
1863 ret = configure_filtergraph(fg);
1865 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1869 finish_output_stream(ost);
1872 ret = init_output_stream(ost, error, sizeof(error));
1874 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1875 ost->file_index, ost->index, error);
1880 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1882 #if FF_API_LAVF_FMT_RAWPICTURE
1883 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1887 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1891 const char *desc = NULL;
1895 switch (enc->codec_type) {
1896 case AVMEDIA_TYPE_AUDIO:
1899 case AVMEDIA_TYPE_VIDEO:
1906 av_init_packet(&pkt);
1910 update_benchmark(NULL);
1912 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1913 ret = avcodec_send_frame(enc, NULL);
1915 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1922 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1923 if (ret < 0 && ret != AVERROR_EOF) {
1924 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1929 if (ost->logfile && enc->stats_out) {
1930 fprintf(ost->logfile, "%s", enc->stats_out);
1932 if (ret == AVERROR_EOF) {
1935 if (ost->finished & MUXER_FINISHED) {
1936 av_packet_unref(&pkt);
1939 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1940 pkt_size = pkt.size;
1941 output_packet(of, &pkt, ost);
1942 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1943 do_video_stats(ost, pkt_size);
1950 * Check whether a packet from ist should be written into ost at this time
1952 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1954 OutputFile *of = output_files[ost->file_index];
1955 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1957 if (ost->source_index != ist_index)
1963 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1969 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1971 OutputFile *of = output_files[ost->file_index];
1972 InputFile *f = input_files [ist->file_index];
1973 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1974 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1978 av_init_packet(&opkt);
1980 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1981 !ost->copy_initial_nonkeyframes)
1984 if (!ost->frame_number && !ost->copy_prior_start) {
1985 int64_t comp_start = start_time;
1986 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1987 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1988 if (pkt->pts == AV_NOPTS_VALUE ?
1989 ist->pts < comp_start :
1990 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1994 if (of->recording_time != INT64_MAX &&
1995 ist->pts >= of->recording_time + start_time) {
1996 close_output_stream(ost);
2000 if (f->recording_time != INT64_MAX) {
2001 start_time = f->ctx->start_time;
2002 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2003 start_time += f->start_time;
2004 if (ist->pts >= f->recording_time + start_time) {
2005 close_output_stream(ost);
2010 /* force the input stream PTS */
2011 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2014 if (pkt->pts != AV_NOPTS_VALUE)
2015 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2017 opkt.pts = AV_NOPTS_VALUE;
2019 if (pkt->dts == AV_NOPTS_VALUE)
2020 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2022 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2023 opkt.dts -= ost_tb_start_time;
2025 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2026 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2028 duration = ist->dec_ctx->frame_size;
2029 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2030 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2031 ost->mux_timebase) - ost_tb_start_time;
2034 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2036 opkt.flags = pkt->flags;
2037 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2038 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2039 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2040 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2041 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2043 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2044 &opkt.data, &opkt.size,
2045 pkt->data, pkt->size,
2046 pkt->flags & AV_PKT_FLAG_KEY);
2048 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2053 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2058 opkt.data = pkt->data;
2059 opkt.size = pkt->size;
2061 av_copy_packet_side_data(&opkt, pkt);
2063 #if FF_API_LAVF_FMT_RAWPICTURE
2064 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2065 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2066 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2067 /* store AVPicture in AVPacket, as expected by the output format */
2068 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2070 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2074 opkt.data = (uint8_t *)&pict;
2075 opkt.size = sizeof(AVPicture);
2076 opkt.flags |= AV_PKT_FLAG_KEY;
2080 output_packet(of, &opkt, ost);
2083 int guess_input_channel_layout(InputStream *ist)
2085 AVCodecContext *dec = ist->dec_ctx;
2087 if (!dec->channel_layout) {
2088 char layout_name[256];
2090 if (dec->channels > ist->guess_layout_max)
2092 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2093 if (!dec->channel_layout)
2095 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2096 dec->channels, dec->channel_layout);
2097 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2098 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2103 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2105 if (*got_output || ret<0)
2106 decode_error_stat[ret<0] ++;
2108 if (ret < 0 && exit_on_error)
2111 if (exit_on_error && *got_output && ist) {
2112 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2113 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2119 // Filters can be configured only if the formats of all inputs are known.
2120 static int ifilter_has_all_input_formats(FilterGraph *fg)
2123 for (i = 0; i < fg->nb_inputs; i++) {
2124 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2131 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2133 FilterGraph *fg = ifilter->graph;
2134 int need_reinit, ret, i;
2136 /* determine if the parameters for this input changed */
2137 need_reinit = ifilter->format != frame->format;
2138 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2139 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2142 switch (ifilter->ist->st->codecpar->codec_type) {
2143 case AVMEDIA_TYPE_AUDIO:
2144 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2145 ifilter->channels != frame->channels ||
2146 ifilter->channel_layout != frame->channel_layout;
2148 case AVMEDIA_TYPE_VIDEO:
2149 need_reinit |= ifilter->width != frame->width ||
2150 ifilter->height != frame->height;
2155 ret = ifilter_parameters_from_frame(ifilter, frame);
2160 /* (re)init the graph if possible, otherwise buffer the frame and return */
2161 if (need_reinit || !fg->graph) {
2162 for (i = 0; i < fg->nb_inputs; i++) {
2163 if (!ifilter_has_all_input_formats(fg)) {
2164 AVFrame *tmp = av_frame_clone(frame);
2166 return AVERROR(ENOMEM);
2167 av_frame_unref(frame);
2169 if (!av_fifo_space(ifilter->frame_queue)) {
2170 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2172 av_frame_free(&tmp);
2176 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2181 ret = reap_filters(1);
2182 if (ret < 0 && ret != AVERROR_EOF) {
2184 av_strerror(ret, errbuf, sizeof(errbuf));
2186 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2190 ret = configure_filtergraph(fg);
2192 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2197 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2199 if (ret != AVERROR_EOF)
2200 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2207 static int ifilter_send_eof(InputFilter *ifilter)
2213 if (ifilter->filter) {
2214 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2218 // the filtergraph was never configured
2219 FilterGraph *fg = ifilter->graph;
2220 for (i = 0; i < fg->nb_inputs; i++)
2221 if (!fg->inputs[i]->eof)
2223 if (i == fg->nb_inputs) {
2224 // All the input streams have finished without the filtergraph
2225 // ever being configured.
2226 // Mark the output streams as finished.
2227 for (j = 0; j < fg->nb_outputs; j++)
2228 finish_output_stream(fg->outputs[j]->ost);
2235 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2236 // There is the following difference: if you got a frame, you must call
2237 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2238 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2239 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2246 ret = avcodec_send_packet(avctx, pkt);
2247 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2248 // decoded frames with avcodec_receive_frame() until done.
2249 if (ret < 0 && ret != AVERROR_EOF)
2253 ret = avcodec_receive_frame(avctx, frame);
2254 if (ret < 0 && ret != AVERROR(EAGAIN))
2262 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2267 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2268 for (i = 0; i < ist->nb_filters; i++) {
2269 if (i < ist->nb_filters - 1) {
2270 f = ist->filter_frame;
2271 ret = av_frame_ref(f, decoded_frame);
2276 ret = ifilter_send_frame(ist->filters[i], f);
2277 if (ret == AVERROR_EOF)
2278 ret = 0; /* ignore */
2280 av_log(NULL, AV_LOG_ERROR,
2281 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2288 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2291 AVFrame *decoded_frame;
2292 AVCodecContext *avctx = ist->dec_ctx;
2294 AVRational decoded_frame_tb;
2296 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2297 return AVERROR(ENOMEM);
2298 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2299 return AVERROR(ENOMEM);
2300 decoded_frame = ist->decoded_frame;
2302 update_benchmark(NULL);
2303 ret = decode(avctx, decoded_frame, got_output, pkt);
2304 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2308 if (ret >= 0 && avctx->sample_rate <= 0) {
2309 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2310 ret = AVERROR_INVALIDDATA;
2313 if (ret != AVERROR_EOF)
2314 check_decode_result(ist, got_output, ret);
2316 if (!*got_output || ret < 0)
2319 ist->samples_decoded += decoded_frame->nb_samples;
2320 ist->frames_decoded++;
2323 /* increment next_dts to use for the case where the input stream does not
2324 have timestamps or there are multiple frames in the packet */
2325 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2327 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2332 decoded_frame_tb = ist->st->time_base;
2333 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2334 decoded_frame->pts = pkt->pts;
2335 decoded_frame_tb = ist->st->time_base;
2337 decoded_frame->pts = ist->dts;
2338 decoded_frame_tb = AV_TIME_BASE_Q;
2340 if (decoded_frame->pts != AV_NOPTS_VALUE)
2341 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2342 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2343 (AVRational){1, avctx->sample_rate});
2344 ist->nb_samples = decoded_frame->nb_samples;
2345 err = send_frame_to_filters(ist, decoded_frame);
2347 av_frame_unref(ist->filter_frame);
2348 av_frame_unref(decoded_frame);
2349 return err < 0 ? err : ret;
2352 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2355 AVFrame *decoded_frame;
2356 int i, ret = 0, err = 0;
2357 int64_t best_effort_timestamp;
2358 int64_t dts = AV_NOPTS_VALUE;
2361 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2362 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2364 if (!eof && pkt && pkt->size == 0)
2367 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2368 return AVERROR(ENOMEM);
2369 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2370 return AVERROR(ENOMEM);
2371 decoded_frame = ist->decoded_frame;
2372 if (ist->dts != AV_NOPTS_VALUE)
2373 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2376 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2379 // The old code used to set dts on the drain packet, which does not work
2380 // with the new API anymore.
2382 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2384 return AVERROR(ENOMEM);
2385 ist->dts_buffer = new;
2386 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2389 update_benchmark(NULL);
2390 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2391 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2395 // The following line may be required in some cases where there is no parser
2396 // or the parser does not has_b_frames correctly
2397 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2398 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2399 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2401 av_log(ist->dec_ctx, AV_LOG_WARNING,
2402 "video_delay is larger in decoder than demuxer %d > %d.\n"
2403 "If you want to help, upload a sample "
2404 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2405 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2406 ist->dec_ctx->has_b_frames,
2407 ist->st->codecpar->video_delay);
2410 if (ret != AVERROR_EOF)
2411 check_decode_result(ist, got_output, ret);
2413 if (*got_output && ret >= 0) {
2414 if (ist->dec_ctx->width != decoded_frame->width ||
2415 ist->dec_ctx->height != decoded_frame->height ||
2416 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2417 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2418 decoded_frame->width,
2419 decoded_frame->height,
2420 decoded_frame->format,
2421 ist->dec_ctx->width,
2422 ist->dec_ctx->height,
2423 ist->dec_ctx->pix_fmt);
2427 if (!*got_output || ret < 0)
2430 if(ist->top_field_first>=0)
2431 decoded_frame->top_field_first = ist->top_field_first;
2433 ist->frames_decoded++;
2435 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2436 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2440 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2442 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2444 if (ist->framerate.num)
2445 best_effort_timestamp = ist->cfr_next_pts++;
2447 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2448 best_effort_timestamp = ist->dts_buffer[0];
2450 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2451 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2452 ist->nb_dts_buffer--;
2455 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2456 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2458 if (ts != AV_NOPTS_VALUE)
2459 ist->next_pts = ist->pts = ts;
2463 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2464 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2465 ist->st->index, av_ts2str(decoded_frame->pts),
2466 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2467 best_effort_timestamp,
2468 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2469 decoded_frame->key_frame, decoded_frame->pict_type,
2470 ist->st->time_base.num, ist->st->time_base.den);
2473 if (ist->st->sample_aspect_ratio.num)
2474 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2476 err = send_frame_to_filters(ist, decoded_frame);
2479 av_frame_unref(ist->filter_frame);
2480 av_frame_unref(decoded_frame);
2481 return err < 0 ? err : ret;
2484 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2487 AVSubtitle subtitle;
2489 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2490 &subtitle, got_output, pkt);
2492 check_decode_result(NULL, got_output, ret);
2494 if (ret < 0 || !*got_output) {
2497 sub2video_flush(ist);
2501 if (ist->fix_sub_duration) {
2503 if (ist->prev_sub.got_output) {
2504 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2505 1000, AV_TIME_BASE);
2506 if (end < ist->prev_sub.subtitle.end_display_time) {
2507 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2508 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2509 ist->prev_sub.subtitle.end_display_time, end,
2510 end <= 0 ? ", dropping it" : "");
2511 ist->prev_sub.subtitle.end_display_time = end;
2514 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2515 FFSWAP(int, ret, ist->prev_sub.ret);
2516 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2524 if (ist->sub2video.frame) {
2525 sub2video_update(ist, &subtitle);
2526 } else if (ist->nb_filters) {
2527 if (!ist->sub2video.sub_queue)
2528 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2529 if (!ist->sub2video.sub_queue)
2531 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2532 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2536 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2540 if (!subtitle.num_rects)
2543 ist->frames_decoded++;
2545 for (i = 0; i < nb_output_streams; i++) {
2546 OutputStream *ost = output_streams[i];
2548 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2549 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2552 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2557 avsubtitle_free(&subtitle);
2561 static int send_filter_eof(InputStream *ist)
2564 for (i = 0; i < ist->nb_filters; i++) {
2565 ret = ifilter_send_eof(ist->filters[i]);
2572 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2573 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2577 int eof_reached = 0;
2580 if (!ist->saw_first_ts) {
2581 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2583 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2584 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2585 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2587 ist->saw_first_ts = 1;
2590 if (ist->next_dts == AV_NOPTS_VALUE)
2591 ist->next_dts = ist->dts;
2592 if (ist->next_pts == AV_NOPTS_VALUE)
2593 ist->next_pts = ist->pts;
2597 av_init_packet(&avpkt);
2604 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2605 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2606 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2607 ist->next_pts = ist->pts = ist->dts;
2610 // while we have more to decode or while the decoder did output something on EOF
2611 while (ist->decoding_needed) {
2612 int64_t duration = 0;
2614 int decode_failed = 0;
2616 ist->pts = ist->next_pts;
2617 ist->dts = ist->next_dts;
2619 switch (ist->dec_ctx->codec_type) {
2620 case AVMEDIA_TYPE_AUDIO:
2621 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2624 case AVMEDIA_TYPE_VIDEO:
2625 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2627 if (!repeating || !pkt || got_output) {
2628 if (pkt && pkt->duration) {
2629 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2631 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2632 duration = ((int64_t)AV_TIME_BASE *
2633 ist->dec_ctx->framerate.den * ticks) /
2634 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2637 if(ist->dts != AV_NOPTS_VALUE && duration) {
2638 ist->next_dts += duration;
2640 ist->next_dts = AV_NOPTS_VALUE;
2644 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2646 case AVMEDIA_TYPE_SUBTITLE:
2649 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2650 if (!pkt && ret >= 0)
2657 if (ret == AVERROR_EOF) {
2663 if (decode_failed) {
2664 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2665 ist->file_index, ist->st->index, av_err2str(ret));
2667 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2668 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2670 if (!decode_failed || exit_on_error)
2676 ist->got_output = 1;
2681 // During draining, we might get multiple output frames in this loop.
2682 // ffmpeg.c does not drain the filter chain on configuration changes,
2683 // which means if we send multiple frames at once to the filters, and
2684 // one of those frames changes configuration, the buffered frames will
2685 // be lost. This can upset certain FATE tests.
2686 // Decode only 1 frame per call on EOF to appease these FATE tests.
2687 // The ideal solution would be to rewrite decoding to use the new
2688 // decoding API in a better way.
2695 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2696 /* except when looping we need to flush but not to send an EOF */
2697 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2698 int ret = send_filter_eof(ist);
2700 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2705 /* handle stream copy */
2706 if (!ist->decoding_needed) {
2707 ist->dts = ist->next_dts;
2708 switch (ist->dec_ctx->codec_type) {
2709 case AVMEDIA_TYPE_AUDIO:
2710 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2711 ist->dec_ctx->sample_rate;
2713 case AVMEDIA_TYPE_VIDEO:
2714 if (ist->framerate.num) {
2715 // TODO: Remove work-around for c99-to-c89 issue 7
2716 AVRational time_base_q = AV_TIME_BASE_Q;
2717 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2718 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2719 } else if (pkt->duration) {
2720 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721 } else if(ist->dec_ctx->framerate.num != 0) {
2722 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2723 ist->next_dts += ((int64_t)AV_TIME_BASE *
2724 ist->dec_ctx->framerate.den * ticks) /
2725 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2729 ist->pts = ist->dts;
2730 ist->next_pts = ist->next_dts;
2732 for (i = 0; pkt && i < nb_output_streams; i++) {
2733 OutputStream *ost = output_streams[i];
2735 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2738 do_streamcopy(ist, ost, pkt);
2741 return !eof_reached;
2744 static void print_sdp(void)
2749 AVIOContext *sdp_pb;
2750 AVFormatContext **avc;
2752 for (i = 0; i < nb_output_files; i++) {
2753 if (!output_files[i]->header_written)
2757 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2760 for (i = 0, j = 0; i < nb_output_files; i++) {
2761 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2762 avc[j] = output_files[i]->ctx;
2770 av_sdp_create(avc, j, sdp, sizeof(sdp));
2772 if (!sdp_filename) {
2773 printf("SDP:\n%s\n", sdp);
2776 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2777 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2779 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2780 avio_closep(&sdp_pb);
2781 av_freep(&sdp_filename);
2789 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2792 for (i = 0; hwaccels[i].name; i++)
2793 if (hwaccels[i].pix_fmt == pix_fmt)
2794 return &hwaccels[i];
2798 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2800 InputStream *ist = s->opaque;
2801 const enum AVPixelFormat *p;
2804 for (p = pix_fmts; *p != -1; p++) {
2805 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2806 const HWAccel *hwaccel;
2808 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811 hwaccel = get_hwaccel(*p);
2813 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2814 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2817 ret = hwaccel->init(s);
2819 if (ist->hwaccel_id == hwaccel->id) {
2820 av_log(NULL, AV_LOG_FATAL,
2821 "%s hwaccel requested for input stream #%d:%d, "
2822 "but cannot be initialized.\n", hwaccel->name,
2823 ist->file_index, ist->st->index);
2824 return AV_PIX_FMT_NONE;
2829 if (ist->hw_frames_ctx) {
2830 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2831 if (!s->hw_frames_ctx)
2832 return AV_PIX_FMT_NONE;
2835 ist->active_hwaccel_id = hwaccel->id;
2836 ist->hwaccel_pix_fmt = *p;
2843 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2845 InputStream *ist = s->opaque;
2847 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2848 return ist->hwaccel_get_buffer(s, frame, flags);
2850 return avcodec_default_get_buffer2(s, frame, flags);
2853 static int init_input_stream(int ist_index, char *error, int error_len)
2856 InputStream *ist = input_streams[ist_index];
2858 if (ist->decoding_needed) {
2859 AVCodec *codec = ist->dec;
2861 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2862 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2863 return AVERROR(EINVAL);
2866 ist->dec_ctx->opaque = ist;
2867 ist->dec_ctx->get_format = get_format;
2868 ist->dec_ctx->get_buffer2 = get_buffer;
2869 ist->dec_ctx->thread_safe_callbacks = 1;
2871 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2872 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2873 (ist->decoding_needed & DECODING_FOR_OST)) {
2874 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2875 if (ist->decoding_needed & DECODING_FOR_FILTER)
2876 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2879 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2881 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2882 * audio, and video decoders such as cuvid or mediacodec */
2883 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2885 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2886 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2888 ret = hw_device_setup_for_decode(ist);
2890 snprintf(error, error_len, "Device setup failed for "
2891 "decoder on input stream #%d:%d : %s",
2892 ist->file_index, ist->st->index, av_err2str(ret));
2896 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2897 if (ret == AVERROR_EXPERIMENTAL)
2898 abort_codec_experimental(codec, 0);
2900 snprintf(error, error_len,
2901 "Error while opening decoder for input stream "
2903 ist->file_index, ist->st->index, av_err2str(ret));
2906 assert_avoptions(ist->decoder_opts);
2909 ist->next_pts = AV_NOPTS_VALUE;
2910 ist->next_dts = AV_NOPTS_VALUE;
2915 static InputStream *get_input_stream(OutputStream *ost)
2917 if (ost->source_index >= 0)
2918 return input_streams[ost->source_index];
2922 static int compare_int64(const void *a, const void *b)
2924 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2927 /* open the muxer when all the streams are initialized */
2928 static int check_init_output_file(OutputFile *of, int file_index)
2932 for (i = 0; i < of->ctx->nb_streams; i++) {
2933 OutputStream *ost = output_streams[of->ost_index + i];
2934 if (!ost->initialized)
2938 of->ctx->interrupt_callback = int_cb;
2940 ret = avformat_write_header(of->ctx, &of->opts);
2942 av_log(NULL, AV_LOG_ERROR,
2943 "Could not write header for output file #%d "
2944 "(incorrect codec parameters ?): %s\n",
2945 file_index, av_err2str(ret));
2948 //assert_avoptions(of->opts);
2949 of->header_written = 1;
2951 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2953 if (sdp_filename || want_sdp)
2956 /* flush the muxing queues */
2957 for (i = 0; i < of->ctx->nb_streams; i++) {
2958 OutputStream *ost = output_streams[of->ost_index + i];
2960 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2961 if (!av_fifo_size(ost->muxing_queue))
2962 ost->mux_timebase = ost->st->time_base;
2964 while (av_fifo_size(ost->muxing_queue)) {
2966 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2967 write_packet(of, &pkt, ost, 1);
2974 static int init_output_bsfs(OutputStream *ost)
2979 if (!ost->nb_bitstream_filters)
2982 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2983 ctx = ost->bsf_ctx[i];
2985 ret = avcodec_parameters_copy(ctx->par_in,
2986 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2990 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2992 ret = av_bsf_init(ctx);
2994 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2995 ost->bsf_ctx[i]->filter->name);
3000 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3001 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3005 ost->st->time_base = ctx->time_base_out;
3010 static int init_output_stream_streamcopy(OutputStream *ost)
3012 OutputFile *of = output_files[ost->file_index];
3013 InputStream *ist = get_input_stream(ost);
3014 AVCodecParameters *par_dst = ost->st->codecpar;
3015 AVCodecParameters *par_src = ost->ref_par;
3018 uint32_t codec_tag = par_dst->codec_tag;
3020 av_assert0(ist && !ost->filter);
3022 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3024 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3026 av_log(NULL, AV_LOG_FATAL,
3027 "Error setting up codec context options.\n");
3030 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3033 unsigned int codec_tag_tmp;
3034 if (!of->ctx->oformat->codec_tag ||
3035 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3036 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3037 codec_tag = par_src->codec_tag;
3040 ret = avcodec_parameters_copy(par_dst, par_src);
3044 par_dst->codec_tag = codec_tag;
3046 if (!ost->frame_rate.num)
3047 ost->frame_rate = ist->framerate;
3048 ost->st->avg_frame_rate = ost->frame_rate;
3050 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3054 // copy timebase while removing common factors
3055 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3056 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3058 // copy estimated duration as a hint to the muxer
3059 if (ost->st->duration <= 0 && ist->st->duration > 0)
3060 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3063 ost->st->disposition = ist->st->disposition;
3065 if (ist->st->nb_side_data) {
3066 for (i = 0; i < ist->st->nb_side_data; i++) {
3067 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3070 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3072 return AVERROR(ENOMEM);
3073 memcpy(dst_data, sd_src->data, sd_src->size);
3077 if (ost->rotate_overridden) {
3078 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3079 sizeof(int32_t) * 9);
3081 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3084 ost->parser = av_parser_init(par_dst->codec_id);
3085 ost->parser_avctx = avcodec_alloc_context3(NULL);
3086 if (!ost->parser_avctx)
3087 return AVERROR(ENOMEM);
3089 switch (par_dst->codec_type) {
3090 case AVMEDIA_TYPE_AUDIO:
3091 if (audio_volume != 256) {
3092 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3095 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3096 par_dst->block_align= 0;
3097 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3098 par_dst->block_align= 0;
3100 case AVMEDIA_TYPE_VIDEO:
3101 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3103 av_mul_q(ost->frame_aspect_ratio,
3104 (AVRational){ par_dst->height, par_dst->width });
3105 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3106 "with stream copy may produce invalid files\n");
3108 else if (ist->st->sample_aspect_ratio.num)
3109 sar = ist->st->sample_aspect_ratio;
3111 sar = par_src->sample_aspect_ratio;
3112 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3113 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3114 ost->st->r_frame_rate = ist->st->r_frame_rate;
3118 ost->mux_timebase = ist->st->time_base;
3123 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3125 AVDictionaryEntry *e;
3127 uint8_t *encoder_string;
3128 int encoder_string_len;
3129 int format_flags = 0;
3130 int codec_flags = 0;
3132 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3135 e = av_dict_get(of->opts, "fflags", NULL, 0);
3137 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3140 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3142 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3144 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3147 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3150 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3151 encoder_string = av_mallocz(encoder_string_len);
3152 if (!encoder_string)
3155 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3156 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3158 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3159 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3160 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3161 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3164 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3165 AVCodecContext *avctx)
3168 int n = 1, i, size, index = 0;
3171 for (p = kf; *p; p++)
3175 pts = av_malloc_array(size, sizeof(*pts));
3177 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3182 for (i = 0; i < n; i++) {
3183 char *next = strchr(p, ',');
3188 if (!memcmp(p, "chapters", 8)) {
3190 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3193 if (avf->nb_chapters > INT_MAX - size ||
3194 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3196 av_log(NULL, AV_LOG_FATAL,
3197 "Could not allocate forced key frames array.\n");
3200 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3201 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3203 for (j = 0; j < avf->nb_chapters; j++) {
3204 AVChapter *c = avf->chapters[j];
3205 av_assert1(index < size);
3206 pts[index++] = av_rescale_q(c->start, c->time_base,
3207 avctx->time_base) + t;
3212 t = parse_time_or_die("force_key_frames", p, 1);
3213 av_assert1(index < size);
3214 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3221 av_assert0(index == size);
3222 qsort(pts, size, sizeof(*pts), compare_int64);
3223 ost->forced_kf_count = size;
3224 ost->forced_kf_pts = pts;
3227 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3229 InputStream *ist = get_input_stream(ost);
3230 AVCodecContext *enc_ctx = ost->enc_ctx;
3231 AVFormatContext *oc;
3233 if (ost->enc_timebase.num > 0) {
3234 enc_ctx->time_base = ost->enc_timebase;
3238 if (ost->enc_timebase.num < 0) {
3240 enc_ctx->time_base = ist->st->time_base;
3244 oc = output_files[ost->file_index]->ctx;
3245 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3248 enc_ctx->time_base = default_time_base;
3251 static int init_output_stream_encode(OutputStream *ost)
3253 InputStream *ist = get_input_stream(ost);
3254 AVCodecContext *enc_ctx = ost->enc_ctx;
3255 AVCodecContext *dec_ctx = NULL;
3256 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3259 set_encoder_id(output_files[ost->file_index], ost);
3261 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3262 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3263 // which have to be filtered out to prevent leaking them to output files.
3264 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3267 ost->st->disposition = ist->st->disposition;
3269 dec_ctx = ist->dec_ctx;
3271 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3273 for (j = 0; j < oc->nb_streams; j++) {
3274 AVStream *st = oc->streams[j];
3275 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3278 if (j == oc->nb_streams)
3279 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3280 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3281 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3284 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3285 if (!ost->frame_rate.num)
3286 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3287 if (ist && !ost->frame_rate.num)
3288 ost->frame_rate = ist->framerate;
3289 if (ist && !ost->frame_rate.num)
3290 ost->frame_rate = ist->st->r_frame_rate;
3291 if (ist && !ost->frame_rate.num) {
3292 ost->frame_rate = (AVRational){25, 1};
3293 av_log(NULL, AV_LOG_WARNING,
3295 "about the input framerate is available. Falling "
3296 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3297 "if you want a different framerate.\n",
3298 ost->file_index, ost->index);
3300 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3301 if (ost->enc->supported_framerates && !ost->force_fps) {
3302 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3303 ost->frame_rate = ost->enc->supported_framerates[idx];
3305 // reduce frame rate for mpeg4 to be within the spec limits
3306 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3307 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3308 ost->frame_rate.num, ost->frame_rate.den, 65535);
3312 switch (enc_ctx->codec_type) {
3313 case AVMEDIA_TYPE_AUDIO:
3314 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3316 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3317 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3318 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3319 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3320 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3322 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3325 case AVMEDIA_TYPE_VIDEO:
3326 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3328 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3329 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3330 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3331 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3332 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3333 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3335 for (j = 0; j < ost->forced_kf_count; j++)
3336 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3338 enc_ctx->time_base);
3340 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3341 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3342 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3343 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3344 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3345 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3346 if (!strncmp(ost->enc->name, "libx264", 7) &&
3347 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3348 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3349 av_log(NULL, AV_LOG_WARNING,
3350 "No pixel format specified, %s for H.264 encoding chosen.\n"
3351 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3352 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3353 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3354 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3355 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3356 av_log(NULL, AV_LOG_WARNING,
3357 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3358 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3359 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3360 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3362 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3363 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3365 enc_ctx->framerate = ost->frame_rate;
3367 ost->st->avg_frame_rate = ost->frame_rate;
3370 enc_ctx->width != dec_ctx->width ||
3371 enc_ctx->height != dec_ctx->height ||
3372 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3373 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3376 if (ost->forced_keyframes) {
3377 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3378 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3379 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3381 av_log(NULL, AV_LOG_ERROR,
3382 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3385 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3386 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3387 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3388 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3390 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3391 // parse it only for static kf timings
3392 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3393 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3397 case AVMEDIA_TYPE_SUBTITLE:
3398 enc_ctx->time_base = AV_TIME_BASE_Q;
3399 if (!enc_ctx->width) {
3400 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3401 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3404 case AVMEDIA_TYPE_DATA:
3411 ost->mux_timebase = enc_ctx->time_base;
3416 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3420 if (ost->encoding_needed) {
3421 AVCodec *codec = ost->enc;
3422 AVCodecContext *dec = NULL;
3425 ret = init_output_stream_encode(ost);
3429 if ((ist = get_input_stream(ost)))
3431 if (dec && dec->subtitle_header) {
3432 /* ASS code assumes this buffer is null terminated so add extra byte. */
3433 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3434 if (!ost->enc_ctx->subtitle_header)
3435 return AVERROR(ENOMEM);
3436 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3437 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3439 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3440 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3441 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3443 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3444 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3445 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3447 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3448 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3449 av_buffersink_get_format(ost->filter->filter)) {
3450 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3451 if (!ost->enc_ctx->hw_frames_ctx)
3452 return AVERROR(ENOMEM);
3454 ret = hw_device_setup_for_encode(ost);
3456 snprintf(error, error_len, "Device setup failed for "
3457 "encoder on output stream #%d:%d : %s",
3458 ost->file_index, ost->index, av_err2str(ret));
3463 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3464 if (ret == AVERROR_EXPERIMENTAL)
3465 abort_codec_experimental(codec, 1);
3466 snprintf(error, error_len,
3467 "Error while opening encoder for output stream #%d:%d - "
3468 "maybe incorrect parameters such as bit_rate, rate, width or height",
3469 ost->file_index, ost->index);
3472 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3473 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3474 av_buffersink_set_frame_size(ost->filter->filter,
3475 ost->enc_ctx->frame_size);
3476 assert_avoptions(ost->encoder_opts);
3477 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3478 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3479 " It takes bits/s as argument, not kbits/s\n");
3481 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3483 av_log(NULL, AV_LOG_FATAL,
3484 "Error initializing the output stream codec context.\n");
3488 * FIXME: ost->st->codec should't be needed here anymore.
3490 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3494 if (ost->enc_ctx->nb_coded_side_data) {
3497 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3498 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3501 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3503 return AVERROR(ENOMEM);
3504 memcpy(dst_data, sd_src->data, sd_src->size);
3509 * Add global input side data. For now this is naive, and copies it
3510 * from the input stream's global side data. All side data should
3511 * really be funneled over AVFrame and libavfilter, then added back to
3512 * packet side data, and then potentially using the first packet for
3517 for (i = 0; i < ist->st->nb_side_data; i++) {
3518 AVPacketSideData *sd = &ist->st->side_data[i];
3519 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3521 return AVERROR(ENOMEM);
3522 memcpy(dst, sd->data, sd->size);
3523 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3524 av_display_rotation_set((uint32_t *)dst, 0);
3528 // copy timebase while removing common factors
3529 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3530 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3532 // copy estimated duration as a hint to the muxer
3533 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3534 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3536 ost->st->codec->codec= ost->enc_ctx->codec;
3537 } else if (ost->stream_copy) {
3538 ret = init_output_stream_streamcopy(ost);
3543 * FIXME: will the codec context used by the parser during streamcopy
3544 * This should go away with the new parser API.
3546 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3551 // parse user provided disposition, and update stream values
3552 if (ost->disposition) {
3553 static const AVOption opts[] = {
3554 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3555 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3556 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3557 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3558 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3559 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3560 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3561 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3562 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3563 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3564 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3565 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3566 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3567 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3570 static const AVClass class = {
3572 .item_name = av_default_item_name,
3574 .version = LIBAVUTIL_VERSION_INT,
3576 const AVClass *pclass = &class;
3578 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3583 /* initialize bitstream filters for the output stream
3584 * needs to be done here, because the codec id for streamcopy is not
3585 * known until now */
3586 ret = init_output_bsfs(ost);
3590 ost->initialized = 1;
3592 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3599 static void report_new_stream(int input_index, AVPacket *pkt)
3601 InputFile *file = input_files[input_index];
3602 AVStream *st = file->ctx->streams[pkt->stream_index];
3604 if (pkt->stream_index < file->nb_streams_warn)
3606 av_log(file->ctx, AV_LOG_WARNING,
3607 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3608 av_get_media_type_string(st->codecpar->codec_type),
3609 input_index, pkt->stream_index,
3610 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3611 file->nb_streams_warn = pkt->stream_index + 1;
3614 static int transcode_init(void)
3616 int ret = 0, i, j, k;
3617 AVFormatContext *oc;
3620 char error[1024] = {0};
3622 for (i = 0; i < nb_filtergraphs; i++) {
3623 FilterGraph *fg = filtergraphs[i];
3624 for (j = 0; j < fg->nb_outputs; j++) {
3625 OutputFilter *ofilter = fg->outputs[j];
3626 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3628 if (fg->nb_inputs != 1)
3630 for (k = nb_input_streams-1; k >= 0 ; k--)
3631 if (fg->inputs[0]->ist == input_streams[k])
3633 ofilter->ost->source_index = k;
3637 /* init framerate emulation */
3638 for (i = 0; i < nb_input_files; i++) {
3639 InputFile *ifile = input_files[i];
3640 if (ifile->rate_emu)
3641 for (j = 0; j < ifile->nb_streams; j++)
3642 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3645 /* init input streams */
3646 for (i = 0; i < nb_input_streams; i++)
3647 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3648 for (i = 0; i < nb_output_streams; i++) {
3649 ost = output_streams[i];
3650 avcodec_close(ost->enc_ctx);
3655 /* open each encoder */
3656 for (i = 0; i < nb_output_streams; i++) {
3657 // skip streams fed from filtergraphs until we have a frame for them
3658 if (output_streams[i]->filter)
3661 ret = init_output_stream(output_streams[i], error, sizeof(error));
3666 /* discard unused programs */
3667 for (i = 0; i < nb_input_files; i++) {
3668 InputFile *ifile = input_files[i];
3669 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3670 AVProgram *p = ifile->ctx->programs[j];
3671 int discard = AVDISCARD_ALL;
3673 for (k = 0; k < p->nb_stream_indexes; k++)
3674 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3675 discard = AVDISCARD_DEFAULT;
3678 p->discard = discard;
3682 /* write headers for files with no streams */
3683 for (i = 0; i < nb_output_files; i++) {
3684 oc = output_files[i]->ctx;
3685 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3686 ret = check_init_output_file(output_files[i], i);
3693 /* dump the stream mapping */
3694 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3695 for (i = 0; i < nb_input_streams; i++) {
3696 ist = input_streams[i];
3698 for (j = 0; j < ist->nb_filters; j++) {
3699 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3700 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3701 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3702 ist->filters[j]->name);
3703 if (nb_filtergraphs > 1)
3704 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3705 av_log(NULL, AV_LOG_INFO, "\n");
3710 for (i = 0; i < nb_output_streams; i++) {
3711 ost = output_streams[i];
3713 if (ost->attachment_filename) {
3714 /* an attached file */
3715 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3716 ost->attachment_filename, ost->file_index, ost->index);
3720 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3721 /* output from a complex graph */
3722 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3723 if (nb_filtergraphs > 1)
3724 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3726 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3727 ost->index, ost->enc ? ost->enc->name : "?");
3731 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3732 input_streams[ost->source_index]->file_index,
3733 input_streams[ost->source_index]->st->index,
3736 if (ost->sync_ist != input_streams[ost->source_index])
3737 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3738 ost->sync_ist->file_index,
3739 ost->sync_ist->st->index);
3740 if (ost->stream_copy)
3741 av_log(NULL, AV_LOG_INFO, " (copy)");
3743 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3744 const AVCodec *out_codec = ost->enc;
3745 const char *decoder_name = "?";
3746 const char *in_codec_name = "?";
3747 const char *encoder_name = "?";
3748 const char *out_codec_name = "?";
3749 const AVCodecDescriptor *desc;
3752 decoder_name = in_codec->name;
3753 desc = avcodec_descriptor_get(in_codec->id);
3755 in_codec_name = desc->name;
3756 if (!strcmp(decoder_name, in_codec_name))
3757 decoder_name = "native";
3761 encoder_name = out_codec->name;
3762 desc = avcodec_descriptor_get(out_codec->id);
3764 out_codec_name = desc->name;
3765 if (!strcmp(encoder_name, out_codec_name))
3766 encoder_name = "native";
3769 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3770 in_codec_name, decoder_name,
3771 out_codec_name, encoder_name);
3773 av_log(NULL, AV_LOG_INFO, "\n");
3777 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3781 atomic_store(&transcode_init_done, 1);
3786 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3787 static int need_output(void)
3791 for (i = 0; i < nb_output_streams; i++) {
3792 OutputStream *ost = output_streams[i];
3793 OutputFile *of = output_files[ost->file_index];
3794 AVFormatContext *os = output_files[ost->file_index]->ctx;
3796 if (ost->finished ||
3797 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3799 if (ost->frame_number >= ost->max_frames) {
3801 for (j = 0; j < of->ctx->nb_streams; j++)
3802 close_output_stream(output_streams[of->ost_index + j]);
3813 * Select the output stream to process.
3815 * @return selected output stream, or NULL if none available
3817 static OutputStream *choose_output(void)
3820 int64_t opts_min = INT64_MAX;
3821 OutputStream *ost_min = NULL;
3823 for (i = 0; i < nb_output_streams; i++) {
3824 OutputStream *ost = output_streams[i];
3825 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3826 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3828 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3829 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3831 if (!ost->initialized && !ost->inputs_done)
3834 if (!ost->finished && opts < opts_min) {
3836 ost_min = ost->unavailable ? NULL : ost;
3842 static void set_tty_echo(int on)
3846 if (tcgetattr(0, &tty) == 0) {
3847 if (on) tty.c_lflag |= ECHO;
3848 else tty.c_lflag &= ~ECHO;
3849 tcsetattr(0, TCSANOW, &tty);
3854 static int check_keyboard_interaction(int64_t cur_time)
3857 static int64_t last_time;
3858 if (received_nb_signals)
3859 return AVERROR_EXIT;
3860 /* read_key() returns 0 on EOF */
3861 if(cur_time - last_time >= 100000 && !run_as_daemon){
3863 last_time = cur_time;
3867 return AVERROR_EXIT;
3868 if (key == '+') av_log_set_level(av_log_get_level()+10);
3869 if (key == '-') av_log_set_level(av_log_get_level()-10);
3870 if (key == 's') qp_hist ^= 1;
3873 do_hex_dump = do_pkt_dump = 0;
3874 } else if(do_pkt_dump){
3878 av_log_set_level(AV_LOG_DEBUG);
3880 if (key == 'c' || key == 'C'){
3881 char buf[4096], target[64], command[256], arg[256] = {0};
3884 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3887 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3892 fprintf(stderr, "\n");
3894 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3895 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3896 target, time, command, arg);
3897 for (i = 0; i < nb_filtergraphs; i++) {
3898 FilterGraph *fg = filtergraphs[i];
3901 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3902 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3903 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3904 } else if (key == 'c') {
3905 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3906 ret = AVERROR_PATCHWELCOME;
3908 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3910 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3915 av_log(NULL, AV_LOG_ERROR,
3916 "Parse error, at least 3 arguments were expected, "
3917 "only %d given in string '%s'\n", n, buf);
3920 if (key == 'd' || key == 'D'){
3923 debug = input_streams[0]->st->codec->debug<<1;
3924 if(!debug) debug = 1;
3925 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3932 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3937 fprintf(stderr, "\n");
3938 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3939 fprintf(stderr,"error parsing debug value\n");
3941 for(i=0;i<nb_input_streams;i++) {
3942 input_streams[i]->st->codec->debug = debug;
3944 for(i=0;i<nb_output_streams;i++) {
3945 OutputStream *ost = output_streams[i];
3946 ost->enc_ctx->debug = debug;
3948 if(debug) av_log_set_level(AV_LOG_DEBUG);
3949 fprintf(stderr,"debug=%d\n", debug);
3952 fprintf(stderr, "key function\n"
3953 "? show this help\n"
3954 "+ increase verbosity\n"
3955 "- decrease verbosity\n"
3956 "c Send command to first matching filter supporting it\n"
3957 "C Send/Queue command to all matching filters\n"
3958 "D cycle through available debug modes\n"
3959 "h dump packets/hex press to cycle through the 3 states\n"
3961 "s Show QP histogram\n"
3968 static void *input_thread(void *arg)
3971 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3976 ret = av_read_frame(f->ctx, &pkt);
3978 if (ret == AVERROR(EAGAIN)) {
3983 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3986 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3987 if (flags && ret == AVERROR(EAGAIN)) {
3989 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3990 av_log(f->ctx, AV_LOG_WARNING,
3991 "Thread message queue blocking; consider raising the "
3992 "thread_queue_size option (current value: %d)\n",
3993 f->thread_queue_size);
3996 if (ret != AVERROR_EOF)
3997 av_log(f->ctx, AV_LOG_ERROR,
3998 "Unable to send packet to main thread: %s\n",
4000 av_packet_unref(&pkt);
4001 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4009 static void free_input_threads(void)
4013 for (i = 0; i < nb_input_files; i++) {
4014 InputFile *f = input_files[i];
4017 if (!f || !f->in_thread_queue)
4019 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4020 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4021 av_packet_unref(&pkt);
4023 pthread_join(f->thread, NULL);
4025 av_thread_message_queue_free(&f->in_thread_queue);
4029 static int init_input_threads(void)
4033 if (nb_input_files == 1)
4036 for (i = 0; i < nb_input_files; i++) {
4037 InputFile *f = input_files[i];
4039 if (f->ctx->pb ? !f->ctx->pb->seekable :
4040 strcmp(f->ctx->iformat->name, "lavfi"))
4041 f->non_blocking = 1;
4042 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4043 f->thread_queue_size, sizeof(AVPacket));
4047 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4048 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4049 av_thread_message_queue_free(&f->in_thread_queue);
4050 return AVERROR(ret);
4056 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4058 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4060 AV_THREAD_MESSAGE_NONBLOCK : 0);
4064 static int get_input_packet(InputFile *f, AVPacket *pkt)
4068 for (i = 0; i < f->nb_streams; i++) {
4069 InputStream *ist = input_streams[f->ist_index + i];
4070 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4071 int64_t now = av_gettime_relative() - ist->start;
4073 return AVERROR(EAGAIN);
4078 if (nb_input_files > 1)
4079 return get_input_packet_mt(f, pkt);
4081 return av_read_frame(f->ctx, pkt);
4084 static int got_eagain(void)
4087 for (i = 0; i < nb_output_streams; i++)
4088 if (output_streams[i]->unavailable)
4093 static void reset_eagain(void)
4096 for (i = 0; i < nb_input_files; i++)
4097 input_files[i]->eagain = 0;
4098 for (i = 0; i < nb_output_streams; i++)
4099 output_streams[i]->unavailable = 0;
4102 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4103 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4104 AVRational time_base)
4110 return tmp_time_base;
4113 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4116 return tmp_time_base;
4122 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4125 AVCodecContext *avctx;
4126 int i, ret, has_audio = 0;
4127 int64_t duration = 0;
4129 ret = av_seek_frame(is, -1, is->start_time, 0);
4133 for (i = 0; i < ifile->nb_streams; i++) {
4134 ist = input_streams[ifile->ist_index + i];
4135 avctx = ist->dec_ctx;
4138 if (ist->decoding_needed) {
4139 process_input_packet(ist, NULL, 1);
4140 avcodec_flush_buffers(avctx);
4143 /* duration is the length of the last frame in a stream
4144 * when audio stream is present we don't care about
4145 * last video frame length because it's not defined exactly */
4146 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4150 for (i = 0; i < ifile->nb_streams; i++) {
4151 ist = input_streams[ifile->ist_index + i];
4152 avctx = ist->dec_ctx;
4155 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4156 AVRational sample_rate = {1, avctx->sample_rate};
4158 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4162 if (ist->framerate.num) {
4163 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4164 } else if (ist->st->avg_frame_rate.num) {
4165 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4166 } else duration = 1;
4168 if (!ifile->duration)
4169 ifile->time_base = ist->st->time_base;
4170 /* the total duration of the stream, max_pts - min_pts is
4171 * the duration of the stream without the last frame */
4172 duration += ist->max_pts - ist->min_pts;
4173 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4177 if (ifile->loop > 0)
4185 * - 0 -- one packet was read and processed
4186 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4187 * this function should be called again
4188 * - AVERROR_EOF -- this function should not be called again
4190 static int process_input(int file_index)
4192 InputFile *ifile = input_files[file_index];
4193 AVFormatContext *is;
4201 ret = get_input_packet(ifile, &pkt);
4203 if (ret == AVERROR(EAGAIN)) {
4207 if (ret < 0 && ifile->loop) {
4208 if ((ret = seek_to_start(ifile, is)) < 0)
4210 ret = get_input_packet(ifile, &pkt);
4211 if (ret == AVERROR(EAGAIN)) {
4217 if (ret != AVERROR_EOF) {
4218 print_error(is->filename, ret);
4223 for (i = 0; i < ifile->nb_streams; i++) {
4224 ist = input_streams[ifile->ist_index + i];
4225 if (ist->decoding_needed) {
4226 ret = process_input_packet(ist, NULL, 0);
4231 /* mark all outputs that don't go through lavfi as finished */
4232 for (j = 0; j < nb_output_streams; j++) {
4233 OutputStream *ost = output_streams[j];
4235 if (ost->source_index == ifile->ist_index + i &&
4236 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4237 finish_output_stream(ost);
4241 ifile->eof_reached = 1;
4242 return AVERROR(EAGAIN);
4248 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4249 is->streams[pkt.stream_index]);
4251 /* the following test is needed in case new streams appear
4252 dynamically in stream : we ignore them */
4253 if (pkt.stream_index >= ifile->nb_streams) {
4254 report_new_stream(file_index, &pkt);
4255 goto discard_packet;
4258 ist = input_streams[ifile->ist_index + pkt.stream_index];
4260 ist->data_size += pkt.size;
4264 goto discard_packet;
4266 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4267 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4272 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4273 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4274 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4275 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4276 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4277 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4278 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4279 av_ts2str(input_files[ist->file_index]->ts_offset),
4280 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4283 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4284 int64_t stime, stime2;
4285 // Correcting starttime based on the enabled streams
4286 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4287 // so we instead do it here as part of discontinuity handling
4288 if ( ist->next_dts == AV_NOPTS_VALUE
4289 && ifile->ts_offset == -is->start_time
4290 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4291 int64_t new_start_time = INT64_MAX;
4292 for (i=0; i<is->nb_streams; i++) {
4293 AVStream *st = is->streams[i];
4294 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4296 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4298 if (new_start_time > is->start_time) {
4299 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4300 ifile->ts_offset = -new_start_time;
4304 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4305 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4306 ist->wrap_correction_done = 1;
4308 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4309 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4310 ist->wrap_correction_done = 0;
4312 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4313 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4314 ist->wrap_correction_done = 0;
4318 /* add the stream-global side data to the first packet */
4319 if (ist->nb_packets == 1) {
4320 for (i = 0; i < ist->st->nb_side_data; i++) {
4321 AVPacketSideData *src_sd = &ist->st->side_data[i];
4324 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4327 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4330 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4334 memcpy(dst_data, src_sd->data, src_sd->size);
4338 if (pkt.dts != AV_NOPTS_VALUE)
4339 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4340 if (pkt.pts != AV_NOPTS_VALUE)
4341 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4343 if (pkt.pts != AV_NOPTS_VALUE)
4344 pkt.pts *= ist->ts_scale;
4345 if (pkt.dts != AV_NOPTS_VALUE)
4346 pkt.dts *= ist->ts_scale;
4348 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4349 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4350 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4351 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4352 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4353 int64_t delta = pkt_dts - ifile->last_ts;
4354 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4355 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4356 ifile->ts_offset -= delta;
4357 av_log(NULL, AV_LOG_DEBUG,
4358 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4359 delta, ifile->ts_offset);
4360 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4361 if (pkt.pts != AV_NOPTS_VALUE)
4362 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4366 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4367 if (pkt.pts != AV_NOPTS_VALUE) {
4368 pkt.pts += duration;
4369 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4370 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4373 if (pkt.dts != AV_NOPTS_VALUE)
4374 pkt.dts += duration;
4376 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4377 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4378 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4379 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4381 int64_t delta = pkt_dts - ist->next_dts;
4382 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4383 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4384 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4385 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4386 ifile->ts_offset -= delta;
4387 av_log(NULL, AV_LOG_DEBUG,
4388 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4389 delta, ifile->ts_offset);
4390 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4391 if (pkt.pts != AV_NOPTS_VALUE)
4392 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4395 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4396 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4397 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4398 pkt.dts = AV_NOPTS_VALUE;
4400 if (pkt.pts != AV_NOPTS_VALUE){
4401 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4402 delta = pkt_pts - ist->next_dts;
4403 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4404 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4405 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4406 pkt.pts = AV_NOPTS_VALUE;
4412 if (pkt.dts != AV_NOPTS_VALUE)
4413 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4416 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4417 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4418 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4419 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4420 av_ts2str(input_files[ist->file_index]->ts_offset),
4421 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4424 sub2video_heartbeat(ist, pkt.pts);
4426 process_input_packet(ist, &pkt, 0);
4429 av_packet_unref(&pkt);
4435 * Perform a step of transcoding for the specified filter graph.
4437 * @param[in] graph filter graph to consider
4438 * @param[out] best_ist input stream where a frame would allow to continue
4439 * @return 0 for success, <0 for error
4441 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4444 int nb_requests, nb_requests_max = 0;
4445 InputFilter *ifilter;
4449 ret = avfilter_graph_request_oldest(graph->graph);
4451 return reap_filters(0);
4453 if (ret == AVERROR_EOF) {
4454 ret = reap_filters(1);
4455 for (i = 0; i < graph->nb_outputs; i++)
4456 close_output_stream(graph->outputs[i]->ost);
4459 if (ret != AVERROR(EAGAIN))
4462 for (i = 0; i < graph->nb_inputs; i++) {
4463 ifilter = graph->inputs[i];
4465 if (input_files[ist->file_index]->eagain ||
4466 input_files[ist->file_index]->eof_reached)
4468 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4469 if (nb_requests > nb_requests_max) {
4470 nb_requests_max = nb_requests;
4476 for (i = 0; i < graph->nb_outputs; i++)
4477 graph->outputs[i]->ost->unavailable = 1;
4483 * Run a single step of transcoding.
4485 * @return 0 for success, <0 for error
4487 static int transcode_step(void)
4490 InputStream *ist = NULL;
4493 ost = choose_output();
4500 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4504 if (ost->filter && !ost->filter->graph->graph) {
4505 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4506 ret = configure_filtergraph(ost->filter->graph);
4508 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4514 if (ost->filter && ost->filter->graph->graph) {
4515 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4519 } else if (ost->filter) {
4521 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4522 InputFilter *ifilter = ost->filter->graph->inputs[i];
4523 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4529 ost->inputs_done = 1;
4533 av_assert0(ost->source_index >= 0);
4534 ist = input_streams[ost->source_index];
4537 ret = process_input(ist->file_index);
4538 if (ret == AVERROR(EAGAIN)) {
4539 if (input_files[ist->file_index]->eagain)
4540 ost->unavailable = 1;
4545 return ret == AVERROR_EOF ? 0 : ret;
4547 return reap_filters(0);
4551 * The following code is the main loop of the file converter
4553 static int transcode(void)
4556 AVFormatContext *os;
4559 int64_t timer_start;
4560 int64_t total_packets_written = 0;
4562 ret = transcode_init();
4566 if (stdin_interaction) {
4567 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4570 timer_start = av_gettime_relative();
4573 if ((ret = init_input_threads()) < 0)
4577 while (!received_sigterm) {
4578 int64_t cur_time= av_gettime_relative();
4580 /* if 'q' pressed, exits */
4581 if (stdin_interaction)
4582 if (check_keyboard_interaction(cur_time) < 0)
4585 /* check if there's any stream where output is still needed */
4586 if (!need_output()) {
4587 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4591 ret = transcode_step();
4592 if (ret < 0 && ret != AVERROR_EOF) {
4594 av_strerror(ret, errbuf, sizeof(errbuf));
4596 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4600 /* dump report by using the output first video and audio streams */
4601 print_report(0, timer_start, cur_time);
4604 free_input_threads();
4607 /* at the end of stream, we must flush the decoder buffers */
4608 for (i = 0; i < nb_input_streams; i++) {
4609 ist = input_streams[i];
4610 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4611 process_input_packet(ist, NULL, 0);
4618 /* write the trailer if needed and close file */
4619 for (i = 0; i < nb_output_files; i++) {
4620 os = output_files[i]->ctx;
4621 if (!output_files[i]->header_written) {
4622 av_log(NULL, AV_LOG_ERROR,
4623 "Nothing was written into output file %d (%s), because "
4624 "at least one of its streams received no packets.\n",
4628 if ((ret = av_write_trailer(os)) < 0) {
4629 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4635 /* dump report by using the first video and audio streams */
4636 print_report(1, timer_start, av_gettime_relative());
4638 /* close each encoder */
4639 for (i = 0; i < nb_output_streams; i++) {
4640 ost = output_streams[i];
4641 if (ost->encoding_needed) {
4642 av_freep(&ost->enc_ctx->stats_in);
4644 total_packets_written += ost->packets_written;
4647 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4648 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4652 /* close each decoder */
4653 for (i = 0; i < nb_input_streams; i++) {
4654 ist = input_streams[i];
4655 if (ist->decoding_needed) {
4656 avcodec_close(ist->dec_ctx);
4657 if (ist->hwaccel_uninit)
4658 ist->hwaccel_uninit(ist->dec_ctx);
4662 av_buffer_unref(&hw_device_ctx);
4663 hw_device_free_all();
4670 free_input_threads();
4673 if (output_streams) {
4674 for (i = 0; i < nb_output_streams; i++) {
4675 ost = output_streams[i];
4678 if (fclose(ost->logfile))
4679 av_log(NULL, AV_LOG_ERROR,
4680 "Error closing logfile, loss of information possible: %s\n",
4681 av_err2str(AVERROR(errno)));
4682 ost->logfile = NULL;
4684 av_freep(&ost->forced_kf_pts);
4685 av_freep(&ost->apad);
4686 av_freep(&ost->disposition);
4687 av_dict_free(&ost->encoder_opts);
4688 av_dict_free(&ost->sws_dict);
4689 av_dict_free(&ost->swr_opts);
4690 av_dict_free(&ost->resample_opts);
4698 static int64_t getutime(void)
4701 struct rusage rusage;
4703 getrusage(RUSAGE_SELF, &rusage);
4704 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4705 #elif HAVE_GETPROCESSTIMES
4707 FILETIME c, e, k, u;
4708 proc = GetCurrentProcess();
4709 GetProcessTimes(proc, &c, &e, &k, &u);
4710 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4712 return av_gettime_relative();
4716 static int64_t getmaxrss(void)
4718 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4719 struct rusage rusage;
4720 getrusage(RUSAGE_SELF, &rusage);
4721 return (int64_t)rusage.ru_maxrss * 1024;
4722 #elif HAVE_GETPROCESSMEMORYINFO
4724 PROCESS_MEMORY_COUNTERS memcounters;
4725 proc = GetCurrentProcess();
4726 memcounters.cb = sizeof(memcounters);
4727 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4728 return memcounters.PeakPagefileUsage;
4734 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4738 int main(int argc, char **argv)
4745 register_exit(ffmpeg_cleanup);
4747 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4749 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4750 parse_loglevel(argc, argv, options);
4752 if(argc>1 && !strcmp(argv[1], "-d")){
4754 av_log_set_callback(log_callback_null);
4759 avcodec_register_all();
4761 avdevice_register_all();
4763 avfilter_register_all();
4765 avformat_network_init();
4767 show_banner(argc, argv, options);
4769 /* parse options and open all input/output files */
4770 ret = ffmpeg_parse_options(argc, argv);
4774 if (nb_output_files <= 0 && nb_input_files == 0) {
4776 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4780 /* file converter / grab */
4781 if (nb_output_files <= 0) {
4782 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4786 // if (nb_input_files == 0) {
4787 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4791 for (i = 0; i < nb_output_files; i++) {
4792 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4796 current_time = ti = getutime();
4797 if (transcode() < 0)
4799 ti = getutime() - ti;
4801 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4803 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4804 decode_error_stat[0], decode_error_stat[1]);
4805 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4808 exit_program(received_nb_signals ? 255 : main_return_code);
4809 return main_return_code;