2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
201 pal = (uint32_t *)r->data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
697 if (pkt->size == 0 && pkt->side_data_elems == 0)
699 if (!ost->st->codecpar->extradata && avctx->extradata) {
700 ost->st->codecpar->extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
701 if (!ost->st->codecpar->extradata) {
702 av_log(NULL, AV_LOG_ERROR, "Could not allocate extradata buffer to copy parser data.\n");
705 ost->st->codecpar->extradata_size = avctx->extradata_size;
706 memcpy(ost->st->codecpar->extradata, avctx->extradata, avctx->extradata_size);
709 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
710 if (pkt->dts != AV_NOPTS_VALUE &&
711 pkt->pts != AV_NOPTS_VALUE &&
712 pkt->dts > pkt->pts) {
713 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
715 ost->file_index, ost->st->index);
717 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
718 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
719 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
721 if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
722 pkt->dts != AV_NOPTS_VALUE &&
723 !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
724 ost->last_mux_dts != AV_NOPTS_VALUE) {
725 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
726 if (pkt->dts < max) {
727 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
728 av_log(s, loglevel, "Non-monotonous DTS in output stream "
729 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
730 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
732 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
735 av_log(s, loglevel, "changing to %"PRId64". This may result "
736 "in incorrect timestamps in the output file.\n",
738 if (pkt->pts >= pkt->dts)
739 pkt->pts = FFMAX(pkt->pts, max);
744 ost->last_mux_dts = pkt->dts;
746 ost->data_size += pkt->size;
747 ost->packets_written++;
749 pkt->stream_index = ost->index;
752 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
753 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
754 av_get_media_type_string(ost->enc_ctx->codec_type),
755 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
756 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
761 ret = av_interleaved_write_frame(s, pkt);
763 print_error("av_interleaved_write_frame()", ret);
764 main_return_code = 1;
765 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
767 av_packet_unref(pkt);
770 static void close_output_stream(OutputStream *ost)
772 OutputFile *of = output_files[ost->file_index];
774 ost->finished |= ENCODER_FINISHED;
776 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
777 of->recording_time = FFMIN(of->recording_time, end);
781 static int check_recording_time(OutputStream *ost)
783 OutputFile *of = output_files[ost->file_index];
785 if (of->recording_time != INT64_MAX &&
786 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
787 AV_TIME_BASE_Q) >= 0) {
788 close_output_stream(ost);
794 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
797 AVCodecContext *enc = ost->enc_ctx;
801 av_init_packet(&pkt);
805 if (!check_recording_time(ost))
808 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
809 frame->pts = ost->sync_opts;
810 ost->sync_opts = frame->pts + frame->nb_samples;
811 ost->samples_encoded += frame->nb_samples;
812 ost->frames_encoded++;
814 av_assert0(pkt.size || !pkt.data);
815 update_benchmark(NULL);
817 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
818 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
819 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
820 enc->time_base.num, enc->time_base.den);
823 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
824 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
827 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
830 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
833 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
834 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
835 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
836 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
839 write_frame(s, &pkt, ost);
843 static void do_subtitle_out(AVFormatContext *s,
848 int subtitle_out_max_size = 1024 * 1024;
849 int subtitle_out_size, nb, i;
854 if (sub->pts == AV_NOPTS_VALUE) {
855 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
864 subtitle_out = av_malloc(subtitle_out_max_size);
866 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
871 /* Note: DVB subtitle need one packet to draw them and one other
872 packet to clear them */
873 /* XXX: signal it in the codec context ? */
874 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
879 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
881 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
882 pts -= output_files[ost->file_index]->start_time;
883 for (i = 0; i < nb; i++) {
884 unsigned save_num_rects = sub->num_rects;
886 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
887 if (!check_recording_time(ost))
891 // start_display_time is required to be 0
892 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
893 sub->end_display_time -= sub->start_display_time;
894 sub->start_display_time = 0;
898 ost->frames_encoded++;
900 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
901 subtitle_out_max_size, sub);
903 sub->num_rects = save_num_rects;
904 if (subtitle_out_size < 0) {
905 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
909 av_init_packet(&pkt);
910 pkt.data = subtitle_out;
911 pkt.size = subtitle_out_size;
912 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
913 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
914 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
915 /* XXX: the pts correction is handled here. Maybe handling
916 it in the codec would be better */
918 pkt.pts += 90 * sub->start_display_time;
920 pkt.pts += 90 * sub->end_display_time;
923 write_frame(s, &pkt, ost);
927 static void do_video_out(AVFormatContext *s,
929 AVFrame *next_picture,
932 int ret, format_video_sync;
934 AVCodecContext *enc = ost->enc_ctx;
935 AVCodecContext *mux_enc = ost->st->codec;
936 int nb_frames, nb0_frames, i;
937 double delta, delta0;
940 InputStream *ist = NULL;
941 AVFilterContext *filter = ost->filter->filter;
943 if (ost->source_index >= 0)
944 ist = input_streams[ost->source_index];
946 if (filter->inputs[0]->frame_rate.num > 0 &&
947 filter->inputs[0]->frame_rate.den > 0)
948 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
950 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
951 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
953 if (!ost->filters_script &&
957 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
958 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
963 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
964 ost->last_nb0_frames[1],
965 ost->last_nb0_frames[2]);
967 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
968 delta = delta0 + duration;
970 /* by default, we output a single frame */
971 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
974 format_video_sync = video_sync_method;
975 if (format_video_sync == VSYNC_AUTO) {
976 if(!strcmp(s->oformat->name, "avi")) {
977 format_video_sync = VSYNC_VFR;
979 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
981 && format_video_sync == VSYNC_CFR
982 && input_files[ist->file_index]->ctx->nb_streams == 1
983 && input_files[ist->file_index]->input_ts_offset == 0) {
984 format_video_sync = VSYNC_VSCFR;
986 if (format_video_sync == VSYNC_CFR && copy_ts) {
987 format_video_sync = VSYNC_VSCFR;
990 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
994 format_video_sync != VSYNC_PASSTHROUGH &&
995 format_video_sync != VSYNC_DROP) {
997 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
999 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1000 sync_ipts = ost->sync_opts;
1005 switch (format_video_sync) {
1007 if (ost->frame_number == 0 && delta0 >= 0.5) {
1008 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1011 ost->sync_opts = lrint(sync_ipts);
1014 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1015 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1017 } else if (delta < -1.1)
1019 else if (delta > 1.1) {
1020 nb_frames = lrintf(delta);
1022 nb0_frames = lrintf(delta0 - 0.6);
1028 else if (delta > 0.6)
1029 ost->sync_opts = lrint(sync_ipts);
1032 case VSYNC_PASSTHROUGH:
1033 ost->sync_opts = lrint(sync_ipts);
1040 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1041 nb0_frames = FFMIN(nb0_frames, nb_frames);
1043 memmove(ost->last_nb0_frames + 1,
1044 ost->last_nb0_frames,
1045 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1046 ost->last_nb0_frames[0] = nb0_frames;
1048 if (nb0_frames == 0 && ost->last_dropped) {
1050 av_log(NULL, AV_LOG_VERBOSE,
1051 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1052 ost->frame_number, ost->st->index, ost->last_frame->pts);
1054 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1055 if (nb_frames > dts_error_threshold * 30) {
1056 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1060 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1061 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1063 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1065 /* duplicates frame if needed */
1066 for (i = 0; i < nb_frames; i++) {
1067 AVFrame *in_picture;
1068 av_init_packet(&pkt);
1072 if (i < nb0_frames && ost->last_frame) {
1073 in_picture = ost->last_frame;
1075 in_picture = next_picture;
1080 in_picture->pts = ost->sync_opts;
1083 if (!check_recording_time(ost))
1085 if (ost->frame_number >= ost->max_frames)
1089 #if FF_API_LAVF_FMT_RAWPICTURE
1090 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1091 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1092 /* raw pictures are written as AVPicture structure to
1093 avoid any copies. We support temporarily the older
1095 if (in_picture->interlaced_frame)
1096 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1098 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1099 pkt.data = (uint8_t *)in_picture;
1100 pkt.size = sizeof(AVPicture);
1101 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1102 pkt.flags |= AV_PKT_FLAG_KEY;
1104 write_frame(s, &pkt, ost);
1108 int got_packet, forced_keyframe = 0;
1111 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1112 ost->top_field_first >= 0)
1113 in_picture->top_field_first = !!ost->top_field_first;
1115 if (in_picture->interlaced_frame) {
1116 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1117 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1119 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1121 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1123 in_picture->quality = enc->global_quality;
1124 in_picture->pict_type = 0;
1126 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1127 in_picture->pts * av_q2d(enc->time_base) : NAN;
1128 if (ost->forced_kf_index < ost->forced_kf_count &&
1129 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1130 ost->forced_kf_index++;
1131 forced_keyframe = 1;
1132 } else if (ost->forced_keyframes_pexpr) {
1134 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1135 res = av_expr_eval(ost->forced_keyframes_pexpr,
1136 ost->forced_keyframes_expr_const_values, NULL);
1137 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1138 ost->forced_keyframes_expr_const_values[FKF_N],
1139 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1140 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1141 ost->forced_keyframes_expr_const_values[FKF_T],
1142 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1145 forced_keyframe = 1;
1146 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1147 ost->forced_keyframes_expr_const_values[FKF_N];
1148 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1149 ost->forced_keyframes_expr_const_values[FKF_T];
1150 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1153 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1154 } else if ( ost->forced_keyframes
1155 && !strncmp(ost->forced_keyframes, "source", 6)
1156 && in_picture->key_frame==1) {
1157 forced_keyframe = 1;
1160 if (forced_keyframe) {
1161 in_picture->pict_type = AV_PICTURE_TYPE_I;
1162 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1165 update_benchmark(NULL);
1167 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1168 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1169 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1170 enc->time_base.num, enc->time_base.den);
1173 ost->frames_encoded++;
1175 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1176 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1178 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1184 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1185 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1186 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1187 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1190 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1191 pkt.pts = ost->sync_opts;
1193 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1196 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1197 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1198 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1199 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1202 frame_size = pkt.size;
1203 write_frame(s, &pkt, ost);
1205 /* if two pass, output log */
1206 if (ost->logfile && enc->stats_out) {
1207 fprintf(ost->logfile, "%s", enc->stats_out);
1213 * For video, number of frames in == number of packets out.
1214 * But there may be reordering, so we can't throw away frames on encoder
1215 * flush, we need to limit them here, before they go into encoder.
1217 ost->frame_number++;
1219 if (vstats_filename && frame_size)
1220 do_video_stats(ost, frame_size);
1223 if (!ost->last_frame)
1224 ost->last_frame = av_frame_alloc();
1225 av_frame_unref(ost->last_frame);
1226 if (next_picture && ost->last_frame)
1227 av_frame_ref(ost->last_frame, next_picture);
1229 av_frame_free(&ost->last_frame);
1232 static double psnr(double d)
1234 return -10.0 * log10(d);
1237 static void do_video_stats(OutputStream *ost, int frame_size)
1239 AVCodecContext *enc;
1241 double ti1, bitrate, avg_bitrate;
1243 /* this is executed just the first time do_video_stats is called */
1245 vstats_file = fopen(vstats_filename, "w");
1253 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1254 frame_number = ost->st->nb_frames;
1255 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1256 ost->quality / (float)FF_QP2LAMBDA);
1258 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1259 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1261 fprintf(vstats_file,"f_size= %6d ", frame_size);
1262 /* compute pts value */
1263 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1267 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1268 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1269 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1270 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1271 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1275 static void finish_output_stream(OutputStream *ost)
1277 OutputFile *of = output_files[ost->file_index];
1280 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1283 for (i = 0; i < of->ctx->nb_streams; i++)
1284 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1289 * Get and encode new output from any of the filtergraphs, without causing
1292 * @return 0 for success, <0 for severe errors
1294 static int reap_filters(int flush)
1296 AVFrame *filtered_frame = NULL;
1299 /* Reap all buffers present in the buffer sinks */
1300 for (i = 0; i < nb_output_streams; i++) {
1301 OutputStream *ost = output_streams[i];
1302 OutputFile *of = output_files[ost->file_index];
1303 AVFilterContext *filter;
1304 AVCodecContext *enc = ost->enc_ctx;
1309 filter = ost->filter->filter;
1311 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1312 return AVERROR(ENOMEM);
1314 filtered_frame = ost->filtered_frame;
1317 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1318 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1319 AV_BUFFERSINK_FLAG_NO_REQUEST);
1321 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1322 av_log(NULL, AV_LOG_WARNING,
1323 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1324 } else if (flush && ret == AVERROR_EOF) {
1325 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1326 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1330 if (ost->finished) {
1331 av_frame_unref(filtered_frame);
1334 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1335 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1336 AVRational tb = enc->time_base;
1337 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1339 tb.den <<= extra_bits;
1341 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1342 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1343 float_pts /= 1 << extra_bits;
1344 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1345 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1347 filtered_frame->pts =
1348 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1349 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1351 //if (ost->source_index >= 0)
1352 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1354 switch (filter->inputs[0]->type) {
1355 case AVMEDIA_TYPE_VIDEO:
1356 if (!ost->frame_aspect_ratio.num)
1357 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1360 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1361 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1363 enc->time_base.num, enc->time_base.den);
1366 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1368 case AVMEDIA_TYPE_AUDIO:
1369 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1370 enc->channels != av_frame_get_channels(filtered_frame)) {
1371 av_log(NULL, AV_LOG_ERROR,
1372 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1375 do_audio_out(of->ctx, ost, filtered_frame);
1378 // TODO support subtitle filters
1382 av_frame_unref(filtered_frame);
1389 static void print_final_stats(int64_t total_size)
1391 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1392 uint64_t subtitle_size = 0;
1393 uint64_t data_size = 0;
1394 float percent = -1.0;
1398 for (i = 0; i < nb_output_streams; i++) {
1399 OutputStream *ost = output_streams[i];
1400 switch (ost->enc_ctx->codec_type) {
1401 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1402 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1403 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1404 default: other_size += ost->data_size; break;
1406 extra_size += ost->enc_ctx->extradata_size;
1407 data_size += ost->data_size;
1408 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1409 != AV_CODEC_FLAG_PASS1)
1413 if (data_size && total_size>0 && total_size >= data_size)
1414 percent = 100.0 * (total_size - data_size) / data_size;
1416 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1417 video_size / 1024.0,
1418 audio_size / 1024.0,
1419 subtitle_size / 1024.0,
1420 other_size / 1024.0,
1421 extra_size / 1024.0);
1423 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1425 av_log(NULL, AV_LOG_INFO, "unknown");
1426 av_log(NULL, AV_LOG_INFO, "\n");
1428 /* print verbose per-stream stats */
1429 for (i = 0; i < nb_input_files; i++) {
1430 InputFile *f = input_files[i];
1431 uint64_t total_packets = 0, total_size = 0;
1433 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1434 i, f->ctx->filename);
1436 for (j = 0; j < f->nb_streams; j++) {
1437 InputStream *ist = input_streams[f->ist_index + j];
1438 enum AVMediaType type = ist->dec_ctx->codec_type;
1440 total_size += ist->data_size;
1441 total_packets += ist->nb_packets;
1443 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1444 i, j, media_type_string(type));
1445 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1446 ist->nb_packets, ist->data_size);
1448 if (ist->decoding_needed) {
1449 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1450 ist->frames_decoded);
1451 if (type == AVMEDIA_TYPE_AUDIO)
1452 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1453 av_log(NULL, AV_LOG_VERBOSE, "; ");
1456 av_log(NULL, AV_LOG_VERBOSE, "\n");
1459 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1460 total_packets, total_size);
1463 for (i = 0; i < nb_output_files; i++) {
1464 OutputFile *of = output_files[i];
1465 uint64_t total_packets = 0, total_size = 0;
1467 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1468 i, of->ctx->filename);
1470 for (j = 0; j < of->ctx->nb_streams; j++) {
1471 OutputStream *ost = output_streams[of->ost_index + j];
1472 enum AVMediaType type = ost->enc_ctx->codec_type;
1474 total_size += ost->data_size;
1475 total_packets += ost->packets_written;
1477 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1478 i, j, media_type_string(type));
1479 if (ost->encoding_needed) {
1480 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1481 ost->frames_encoded);
1482 if (type == AVMEDIA_TYPE_AUDIO)
1483 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1484 av_log(NULL, AV_LOG_VERBOSE, "; ");
1487 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1488 ost->packets_written, ost->data_size);
1490 av_log(NULL, AV_LOG_VERBOSE, "\n");
1493 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1494 total_packets, total_size);
1496 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1497 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1499 av_log(NULL, AV_LOG_WARNING, "\n");
1501 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1506 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1509 AVBPrint buf_script;
1511 AVFormatContext *oc;
1513 AVCodecContext *enc;
1514 int frame_number, vid, i;
1517 int64_t pts = INT64_MIN + 1;
1518 static int64_t last_time = -1;
1519 static int qp_histogram[52];
1520 int hours, mins, secs, us;
1524 if (!print_stats && !is_last_report && !progress_avio)
1527 if (!is_last_report) {
1528 if (last_time == -1) {
1529 last_time = cur_time;
1532 if ((cur_time - last_time) < 500000)
1534 last_time = cur_time;
1537 t = (cur_time-timer_start) / 1000000.0;
1540 oc = output_files[0]->ctx;
1542 total_size = avio_size(oc->pb);
1543 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1544 total_size = avio_tell(oc->pb);
1548 av_bprint_init(&buf_script, 0, 1);
1549 for (i = 0; i < nb_output_streams; i++) {
1551 ost = output_streams[i];
1553 if (!ost->stream_copy)
1554 q = ost->quality / (float) FF_QP2LAMBDA;
1556 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1557 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1558 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1559 ost->file_index, ost->index, q);
1561 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1564 frame_number = ost->frame_number;
1565 fps = t > 1 ? frame_number / t : 0;
1566 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1567 frame_number, fps < 9.95, fps, q);
1568 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1569 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1570 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1571 ost->file_index, ost->index, q);
1573 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1577 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1579 for (j = 0; j < 32; j++)
1580 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1583 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1585 double error, error_sum = 0;
1586 double scale, scale_sum = 0;
1588 char type[3] = { 'Y','U','V' };
1589 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1590 for (j = 0; j < 3; j++) {
1591 if (is_last_report) {
1592 error = enc->error[j];
1593 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1595 error = ost->error[j];
1596 scale = enc->width * enc->height * 255.0 * 255.0;
1602 p = psnr(error / scale);
1603 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1604 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1605 ost->file_index, ost->index, type[j] | 32, p);
1607 p = psnr(error_sum / scale_sum);
1608 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1609 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1610 ost->file_index, ost->index, p);
1614 /* compute min output value */
1615 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1616 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1617 ost->st->time_base, AV_TIME_BASE_Q));
1619 nb_frames_drop += ost->last_dropped;
1622 secs = FFABS(pts) / AV_TIME_BASE;
1623 us = FFABS(pts) % AV_TIME_BASE;
1629 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1630 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1632 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1634 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1635 "size=%8.0fkB time=", total_size / 1024.0);
1637 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1638 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1639 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1640 (100 * us) / AV_TIME_BASE);
1643 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1644 av_bprintf(&buf_script, "bitrate=N/A\n");
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1647 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1650 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1651 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1652 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1653 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1654 hours, mins, secs, us);
1656 if (nb_frames_dup || nb_frames_drop)
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1658 nb_frames_dup, nb_frames_drop);
1659 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1660 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1663 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1664 av_bprintf(&buf_script, "speed=N/A\n");
1666 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1667 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1670 if (print_stats || is_last_report) {
1671 const char end = is_last_report ? '\n' : '\r';
1672 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1673 fprintf(stderr, "%s %c", buf, end);
1675 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1680 if (progress_avio) {
1681 av_bprintf(&buf_script, "progress=%s\n",
1682 is_last_report ? "end" : "continue");
1683 avio_write(progress_avio, buf_script.str,
1684 FFMIN(buf_script.len, buf_script.size - 1));
1685 avio_flush(progress_avio);
1686 av_bprint_finalize(&buf_script, NULL);
1687 if (is_last_report) {
1688 if ((ret = avio_closep(&progress_avio)) < 0)
1689 av_log(NULL, AV_LOG_ERROR,
1690 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1695 print_final_stats(total_size);
1698 static void flush_encoders(void)
1702 for (i = 0; i < nb_output_streams; i++) {
1703 OutputStream *ost = output_streams[i];
1704 AVCodecContext *enc = ost->enc_ctx;
1705 AVFormatContext *os = output_files[ost->file_index]->ctx;
1706 int stop_encoding = 0;
1708 if (!ost->encoding_needed)
1711 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1713 #if FF_API_LAVF_FMT_RAWPICTURE
1714 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1719 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1722 switch (enc->codec_type) {
1723 case AVMEDIA_TYPE_AUDIO:
1724 encode = avcodec_encode_audio2;
1727 case AVMEDIA_TYPE_VIDEO:
1728 encode = avcodec_encode_video2;
1739 av_init_packet(&pkt);
1743 update_benchmark(NULL);
1744 ret = encode(enc, &pkt, NULL, &got_packet);
1745 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1747 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1752 if (ost->logfile && enc->stats_out) {
1753 fprintf(ost->logfile, "%s", enc->stats_out);
1759 if (ost->finished & MUXER_FINISHED) {
1760 av_packet_unref(&pkt);
1763 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1764 pkt_size = pkt.size;
1765 write_frame(os, &pkt, ost);
1766 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1767 do_video_stats(ost, pkt_size);
1778 * Check whether a packet from ist should be written into ost at this time
1780 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1782 OutputFile *of = output_files[ost->file_index];
1783 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1785 if (ost->source_index != ist_index)
1791 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1797 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1799 OutputFile *of = output_files[ost->file_index];
1800 InputFile *f = input_files [ist->file_index];
1801 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1802 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1806 av_init_packet(&opkt);
1808 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1809 !ost->copy_initial_nonkeyframes)
1812 if (!ost->frame_number && !ost->copy_prior_start) {
1813 int64_t comp_start = start_time;
1814 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1815 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1816 if (pkt->pts == AV_NOPTS_VALUE ?
1817 ist->pts < comp_start :
1818 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1822 if (of->recording_time != INT64_MAX &&
1823 ist->pts >= of->recording_time + start_time) {
1824 close_output_stream(ost);
1828 if (f->recording_time != INT64_MAX) {
1829 start_time = f->ctx->start_time;
1830 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1831 start_time += f->start_time;
1832 if (ist->pts >= f->recording_time + start_time) {
1833 close_output_stream(ost);
1838 /* force the input stream PTS */
1839 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1842 if (pkt->pts != AV_NOPTS_VALUE)
1843 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1845 opkt.pts = AV_NOPTS_VALUE;
1847 if (pkt->dts == AV_NOPTS_VALUE)
1848 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1850 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1851 opkt.dts -= ost_tb_start_time;
1853 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1854 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1856 duration = ist->dec_ctx->frame_size;
1857 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1858 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1859 ost->st->time_base) - ost_tb_start_time;
1862 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1863 opkt.flags = pkt->flags;
1864 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1865 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1866 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1867 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1868 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1870 int ret = av_parser_change(ost->parser, ost->st->codec,
1871 &opkt.data, &opkt.size,
1872 pkt->data, pkt->size,
1873 pkt->flags & AV_PKT_FLAG_KEY);
1875 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1880 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1885 opkt.data = pkt->data;
1886 opkt.size = pkt->size;
1888 av_copy_packet_side_data(&opkt, pkt);
1890 #if FF_API_LAVF_FMT_RAWPICTURE
1891 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1892 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1893 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1894 /* store AVPicture in AVPacket, as expected by the output format */
1895 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1897 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1901 opkt.data = (uint8_t *)&pict;
1902 opkt.size = sizeof(AVPicture);
1903 opkt.flags |= AV_PKT_FLAG_KEY;
1907 write_frame(of->ctx, &opkt, ost);
1910 int guess_input_channel_layout(InputStream *ist)
1912 AVCodecContext *dec = ist->dec_ctx;
1914 if (!dec->channel_layout) {
1915 char layout_name[256];
1917 if (dec->channels > ist->guess_layout_max)
1919 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1920 if (!dec->channel_layout)
1922 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1923 dec->channels, dec->channel_layout);
1924 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1925 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1930 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1932 if (*got_output || ret<0)
1933 decode_error_stat[ret<0] ++;
1935 if (ret < 0 && exit_on_error)
1938 if (exit_on_error && *got_output && ist) {
1939 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1940 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1946 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1948 AVFrame *decoded_frame, *f;
1949 AVCodecContext *avctx = ist->dec_ctx;
1950 int i, ret, err = 0, resample_changed;
1951 AVRational decoded_frame_tb;
1953 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1954 return AVERROR(ENOMEM);
1955 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1956 return AVERROR(ENOMEM);
1957 decoded_frame = ist->decoded_frame;
1959 update_benchmark(NULL);
1960 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1961 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1963 if (ret >= 0 && avctx->sample_rate <= 0) {
1964 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1965 ret = AVERROR_INVALIDDATA;
1968 check_decode_result(ist, got_output, ret);
1970 if (!*got_output || ret < 0)
1973 ist->samples_decoded += decoded_frame->nb_samples;
1974 ist->frames_decoded++;
1977 /* increment next_dts to use for the case where the input stream does not
1978 have timestamps or there are multiple frames in the packet */
1979 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1981 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1985 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1986 ist->resample_channels != avctx->channels ||
1987 ist->resample_channel_layout != decoded_frame->channel_layout ||
1988 ist->resample_sample_rate != decoded_frame->sample_rate;
1989 if (resample_changed) {
1990 char layout1[64], layout2[64];
1992 if (!guess_input_channel_layout(ist)) {
1993 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1994 "layout for Input Stream #%d.%d\n", ist->file_index,
1998 decoded_frame->channel_layout = avctx->channel_layout;
2000 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2001 ist->resample_channel_layout);
2002 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2003 decoded_frame->channel_layout);
2005 av_log(NULL, AV_LOG_INFO,
2006 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2007 ist->file_index, ist->st->index,
2008 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2009 ist->resample_channels, layout1,
2010 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2011 avctx->channels, layout2);
2013 ist->resample_sample_fmt = decoded_frame->format;
2014 ist->resample_sample_rate = decoded_frame->sample_rate;
2015 ist->resample_channel_layout = decoded_frame->channel_layout;
2016 ist->resample_channels = avctx->channels;
2018 for (i = 0; i < nb_filtergraphs; i++)
2019 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2020 FilterGraph *fg = filtergraphs[i];
2021 if (configure_filtergraph(fg) < 0) {
2022 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2028 /* if the decoder provides a pts, use it instead of the last packet pts.
2029 the decoder could be delaying output by a packet or more. */
2030 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2031 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2032 decoded_frame_tb = avctx->time_base;
2033 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2034 decoded_frame->pts = decoded_frame->pkt_pts;
2035 decoded_frame_tb = ist->st->time_base;
2036 } else if (pkt->pts != AV_NOPTS_VALUE) {
2037 decoded_frame->pts = pkt->pts;
2038 decoded_frame_tb = ist->st->time_base;
2040 decoded_frame->pts = ist->dts;
2041 decoded_frame_tb = AV_TIME_BASE_Q;
2043 pkt->pts = AV_NOPTS_VALUE;
2044 if (decoded_frame->pts != AV_NOPTS_VALUE)
2045 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2046 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2047 (AVRational){1, avctx->sample_rate});
2048 ist->nb_samples = decoded_frame->nb_samples;
2049 for (i = 0; i < ist->nb_filters; i++) {
2050 if (i < ist->nb_filters - 1) {
2051 f = ist->filter_frame;
2052 err = av_frame_ref(f, decoded_frame);
2057 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2058 AV_BUFFERSRC_FLAG_PUSH);
2059 if (err == AVERROR_EOF)
2060 err = 0; /* ignore */
2064 decoded_frame->pts = AV_NOPTS_VALUE;
2066 av_frame_unref(ist->filter_frame);
2067 av_frame_unref(decoded_frame);
2068 return err < 0 ? err : ret;
2071 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2073 AVFrame *decoded_frame, *f;
2074 int i, ret = 0, err = 0, resample_changed;
2075 int64_t best_effort_timestamp;
2076 AVRational *frame_sample_aspect;
2078 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2079 return AVERROR(ENOMEM);
2080 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2081 return AVERROR(ENOMEM);
2082 decoded_frame = ist->decoded_frame;
2083 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2085 update_benchmark(NULL);
2086 ret = avcodec_decode_video2(ist->dec_ctx,
2087 decoded_frame, got_output, pkt);
2088 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2090 // The following line may be required in some cases where there is no parser
2091 // or the parser does not has_b_frames correctly
2092 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2093 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2094 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2096 av_log(ist->dec_ctx, AV_LOG_WARNING,
2097 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2098 "If you want to help, upload a sample "
2099 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2100 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2101 ist->dec_ctx->has_b_frames,
2102 ist->st->codec->has_b_frames);
2105 check_decode_result(ist, got_output, ret);
2107 if (*got_output && ret >= 0) {
2108 if (ist->dec_ctx->width != decoded_frame->width ||
2109 ist->dec_ctx->height != decoded_frame->height ||
2110 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2111 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2112 decoded_frame->width,
2113 decoded_frame->height,
2114 decoded_frame->format,
2115 ist->dec_ctx->width,
2116 ist->dec_ctx->height,
2117 ist->dec_ctx->pix_fmt);
2121 if (!*got_output || ret < 0)
2124 if(ist->top_field_first>=0)
2125 decoded_frame->top_field_first = ist->top_field_first;
2127 ist->frames_decoded++;
2129 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2130 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2134 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2136 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2137 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2138 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2140 if (ts != AV_NOPTS_VALUE)
2141 ist->next_pts = ist->pts = ts;
2145 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2146 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2147 ist->st->index, av_ts2str(decoded_frame->pts),
2148 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2149 best_effort_timestamp,
2150 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2151 decoded_frame->key_frame, decoded_frame->pict_type,
2152 ist->st->time_base.num, ist->st->time_base.den);
2157 if (ist->st->sample_aspect_ratio.num)
2158 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2160 resample_changed = ist->resample_width != decoded_frame->width ||
2161 ist->resample_height != decoded_frame->height ||
2162 ist->resample_pix_fmt != decoded_frame->format;
2163 if (resample_changed) {
2164 av_log(NULL, AV_LOG_INFO,
2165 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2166 ist->file_index, ist->st->index,
2167 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2168 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2170 ist->resample_width = decoded_frame->width;
2171 ist->resample_height = decoded_frame->height;
2172 ist->resample_pix_fmt = decoded_frame->format;
2174 for (i = 0; i < nb_filtergraphs; i++) {
2175 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2176 configure_filtergraph(filtergraphs[i]) < 0) {
2177 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2183 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2184 for (i = 0; i < ist->nb_filters; i++) {
2185 if (!frame_sample_aspect->num)
2186 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2188 if (i < ist->nb_filters - 1) {
2189 f = ist->filter_frame;
2190 err = av_frame_ref(f, decoded_frame);
2195 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2196 if (ret == AVERROR_EOF) {
2197 ret = 0; /* ignore */
2198 } else if (ret < 0) {
2199 av_log(NULL, AV_LOG_FATAL,
2200 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2206 av_frame_unref(ist->filter_frame);
2207 av_frame_unref(decoded_frame);
2208 return err < 0 ? err : ret;
2211 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2213 AVSubtitle subtitle;
2214 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2215 &subtitle, got_output, pkt);
2217 check_decode_result(NULL, got_output, ret);
2219 if (ret < 0 || !*got_output) {
2221 sub2video_flush(ist);
2225 if (ist->fix_sub_duration) {
2227 if (ist->prev_sub.got_output) {
2228 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2229 1000, AV_TIME_BASE);
2230 if (end < ist->prev_sub.subtitle.end_display_time) {
2231 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2232 "Subtitle duration reduced from %d to %d%s\n",
2233 ist->prev_sub.subtitle.end_display_time, end,
2234 end <= 0 ? ", dropping it" : "");
2235 ist->prev_sub.subtitle.end_display_time = end;
2238 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2239 FFSWAP(int, ret, ist->prev_sub.ret);
2240 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2248 sub2video_update(ist, &subtitle);
2250 if (!subtitle.num_rects)
2253 ist->frames_decoded++;
2255 for (i = 0; i < nb_output_streams; i++) {
2256 OutputStream *ost = output_streams[i];
2258 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2259 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2262 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2266 avsubtitle_free(&subtitle);
2270 static int send_filter_eof(InputStream *ist)
2273 for (i = 0; i < ist->nb_filters; i++) {
2274 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2281 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2282 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2288 if (!ist->saw_first_ts) {
2289 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2291 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2292 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2293 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2295 ist->saw_first_ts = 1;
2298 if (ist->next_dts == AV_NOPTS_VALUE)
2299 ist->next_dts = ist->dts;
2300 if (ist->next_pts == AV_NOPTS_VALUE)
2301 ist->next_pts = ist->pts;
2305 av_init_packet(&avpkt);
2313 if (pkt->dts != AV_NOPTS_VALUE) {
2314 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2315 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2316 ist->next_pts = ist->pts = ist->dts;
2319 // while we have more to decode or while the decoder did output something on EOF
2320 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2324 ist->pts = ist->next_pts;
2325 ist->dts = ist->next_dts;
2327 if (avpkt.size && avpkt.size != pkt->size &&
2328 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2329 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2330 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2331 ist->showed_multi_packet_warning = 1;
2334 switch (ist->dec_ctx->codec_type) {
2335 case AVMEDIA_TYPE_AUDIO:
2336 ret = decode_audio (ist, &avpkt, &got_output);
2338 case AVMEDIA_TYPE_VIDEO:
2339 ret = decode_video (ist, &avpkt, &got_output);
2340 if (avpkt.duration) {
2341 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2342 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2343 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2344 duration = ((int64_t)AV_TIME_BASE *
2345 ist->dec_ctx->framerate.den * ticks) /
2346 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2350 if(ist->dts != AV_NOPTS_VALUE && duration) {
2351 ist->next_dts += duration;
2353 ist->next_dts = AV_NOPTS_VALUE;
2356 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2358 case AVMEDIA_TYPE_SUBTITLE:
2359 ret = transcode_subtitles(ist, &avpkt, &got_output);
2366 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2367 ist->file_index, ist->st->index, av_err2str(ret));
2374 avpkt.pts= AV_NOPTS_VALUE;
2376 // touch data and size only if not EOF
2378 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2386 if (got_output && !pkt)
2390 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2391 /* except when looping we need to flush but not to send an EOF */
2392 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2393 int ret = send_filter_eof(ist);
2395 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2400 /* handle stream copy */
2401 if (!ist->decoding_needed) {
2402 ist->dts = ist->next_dts;
2403 switch (ist->dec_ctx->codec_type) {
2404 case AVMEDIA_TYPE_AUDIO:
2405 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2406 ist->dec_ctx->sample_rate;
2408 case AVMEDIA_TYPE_VIDEO:
2409 if (ist->framerate.num) {
2410 // TODO: Remove work-around for c99-to-c89 issue 7
2411 AVRational time_base_q = AV_TIME_BASE_Q;
2412 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2413 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2414 } else if (pkt->duration) {
2415 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2416 } else if(ist->dec_ctx->framerate.num != 0) {
2417 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2418 ist->next_dts += ((int64_t)AV_TIME_BASE *
2419 ist->dec_ctx->framerate.den * ticks) /
2420 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2424 ist->pts = ist->dts;
2425 ist->next_pts = ist->next_dts;
2427 for (i = 0; pkt && i < nb_output_streams; i++) {
2428 OutputStream *ost = output_streams[i];
2430 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2433 do_streamcopy(ist, ost, pkt);
2439 static void print_sdp(void)
2444 AVIOContext *sdp_pb;
2445 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2449 for (i = 0, j = 0; i < nb_output_files; i++) {
2450 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2451 avc[j] = output_files[i]->ctx;
2459 av_sdp_create(avc, j, sdp, sizeof(sdp));
2461 if (!sdp_filename) {
2462 printf("SDP:\n%s\n", sdp);
2465 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2466 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2468 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2469 avio_closep(&sdp_pb);
2470 av_freep(&sdp_filename);
2478 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2481 for (i = 0; hwaccels[i].name; i++)
2482 if (hwaccels[i].pix_fmt == pix_fmt)
2483 return &hwaccels[i];
2487 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2489 InputStream *ist = s->opaque;
2490 const enum AVPixelFormat *p;
2493 for (p = pix_fmts; *p != -1; p++) {
2494 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2495 const HWAccel *hwaccel;
2497 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2500 hwaccel = get_hwaccel(*p);
2502 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2503 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2506 ret = hwaccel->init(s);
2508 if (ist->hwaccel_id == hwaccel->id) {
2509 av_log(NULL, AV_LOG_FATAL,
2510 "%s hwaccel requested for input stream #%d:%d, "
2511 "but cannot be initialized.\n", hwaccel->name,
2512 ist->file_index, ist->st->index);
2513 return AV_PIX_FMT_NONE;
2517 ist->active_hwaccel_id = hwaccel->id;
2518 ist->hwaccel_pix_fmt = *p;
2525 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2527 InputStream *ist = s->opaque;
2529 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2530 return ist->hwaccel_get_buffer(s, frame, flags);
2532 return avcodec_default_get_buffer2(s, frame, flags);
2535 static int init_input_stream(int ist_index, char *error, int error_len)
2538 InputStream *ist = input_streams[ist_index];
2540 if (ist->decoding_needed) {
2541 AVCodec *codec = ist->dec;
2543 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2544 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2545 return AVERROR(EINVAL);
2548 ist->dec_ctx->opaque = ist;
2549 ist->dec_ctx->get_format = get_format;
2550 ist->dec_ctx->get_buffer2 = get_buffer;
2551 ist->dec_ctx->thread_safe_callbacks = 1;
2553 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2554 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2555 (ist->decoding_needed & DECODING_FOR_OST)) {
2556 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2557 if (ist->decoding_needed & DECODING_FOR_FILTER)
2558 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2561 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2563 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2564 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2565 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2566 if (ret == AVERROR_EXPERIMENTAL)
2567 abort_codec_experimental(codec, 0);
2569 snprintf(error, error_len,
2570 "Error while opening decoder for input stream "
2572 ist->file_index, ist->st->index, av_err2str(ret));
2575 assert_avoptions(ist->decoder_opts);
2578 ist->next_pts = AV_NOPTS_VALUE;
2579 ist->next_dts = AV_NOPTS_VALUE;
2584 static InputStream *get_input_stream(OutputStream *ost)
2586 if (ost->source_index >= 0)
2587 return input_streams[ost->source_index];
2591 static int compare_int64(const void *a, const void *b)
2593 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2596 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2600 if (ost->encoding_needed) {
2601 AVCodec *codec = ost->enc;
2602 AVCodecContext *dec = NULL;
2605 if ((ist = get_input_stream(ost)))
2607 if (dec && dec->subtitle_header) {
2608 /* ASS code assumes this buffer is null terminated so add extra byte. */
2609 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2610 if (!ost->enc_ctx->subtitle_header)
2611 return AVERROR(ENOMEM);
2612 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2613 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2615 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2616 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2617 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2619 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2620 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2621 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2623 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2624 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2625 if (!ost->enc_ctx->hw_frames_ctx)
2626 return AVERROR(ENOMEM);
2629 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2630 if (ret == AVERROR_EXPERIMENTAL)
2631 abort_codec_experimental(codec, 1);
2632 snprintf(error, error_len,
2633 "Error while opening encoder for output stream #%d:%d - "
2634 "maybe incorrect parameters such as bit_rate, rate, width or height",
2635 ost->file_index, ost->index);
2638 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2639 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2640 av_buffersink_set_frame_size(ost->filter->filter,
2641 ost->enc_ctx->frame_size);
2642 assert_avoptions(ost->encoder_opts);
2643 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2644 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2645 " It takes bits/s as argument, not kbits/s\n");
2647 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2649 av_log(NULL, AV_LOG_FATAL,
2650 "Error initializing the output stream codec context.\n");
2654 if (ost->enc_ctx->nb_coded_side_data) {
2657 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2658 sizeof(*ost->st->side_data));
2659 if (!ost->st->side_data)
2660 return AVERROR(ENOMEM);
2662 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2663 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2664 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2666 sd_dst->data = av_malloc(sd_src->size);
2668 return AVERROR(ENOMEM);
2669 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2670 sd_dst->size = sd_src->size;
2671 sd_dst->type = sd_src->type;
2672 ost->st->nb_side_data++;
2676 // copy timebase while removing common factors
2677 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2678 ost->st->codec->codec= ost->enc_ctx->codec;
2679 } else if (ost->stream_copy) {
2680 ret = av_opt_set_dict(ost->st->codec, &ost->encoder_opts);
2682 av_log(NULL, AV_LOG_FATAL,
2683 "Error setting up codec context options.\n");
2686 // copy timebase while removing common factors
2687 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2693 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2694 AVCodecContext *avctx)
2697 int n = 1, i, size, index = 0;
2700 for (p = kf; *p; p++)
2704 pts = av_malloc_array(size, sizeof(*pts));
2706 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2711 for (i = 0; i < n; i++) {
2712 char *next = strchr(p, ',');
2717 if (!memcmp(p, "chapters", 8)) {
2719 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2722 if (avf->nb_chapters > INT_MAX - size ||
2723 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2725 av_log(NULL, AV_LOG_FATAL,
2726 "Could not allocate forced key frames array.\n");
2729 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2730 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2732 for (j = 0; j < avf->nb_chapters; j++) {
2733 AVChapter *c = avf->chapters[j];
2734 av_assert1(index < size);
2735 pts[index++] = av_rescale_q(c->start, c->time_base,
2736 avctx->time_base) + t;
2741 t = parse_time_or_die("force_key_frames", p, 1);
2742 av_assert1(index < size);
2743 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2750 av_assert0(index == size);
2751 qsort(pts, size, sizeof(*pts), compare_int64);
2752 ost->forced_kf_count = size;
2753 ost->forced_kf_pts = pts;
2756 static void report_new_stream(int input_index, AVPacket *pkt)
2758 InputFile *file = input_files[input_index];
2759 AVStream *st = file->ctx->streams[pkt->stream_index];
2761 if (pkt->stream_index < file->nb_streams_warn)
2763 av_log(file->ctx, AV_LOG_WARNING,
2764 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2765 av_get_media_type_string(st->codec->codec_type),
2766 input_index, pkt->stream_index,
2767 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2768 file->nb_streams_warn = pkt->stream_index + 1;
2771 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2773 AVDictionaryEntry *e;
2775 uint8_t *encoder_string;
2776 int encoder_string_len;
2777 int format_flags = 0;
2778 int codec_flags = 0;
2780 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2783 e = av_dict_get(of->opts, "fflags", NULL, 0);
2785 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2788 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2790 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2792 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2795 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2798 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2799 encoder_string = av_mallocz(encoder_string_len);
2800 if (!encoder_string)
2803 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2804 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2806 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2807 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2808 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2809 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2812 static int transcode_init(void)
2814 int ret = 0, i, j, k;
2815 AVFormatContext *oc;
2818 char error[1024] = {0};
2821 for (i = 0; i < nb_filtergraphs; i++) {
2822 FilterGraph *fg = filtergraphs[i];
2823 for (j = 0; j < fg->nb_outputs; j++) {
2824 OutputFilter *ofilter = fg->outputs[j];
2825 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2827 if (fg->nb_inputs != 1)
2829 for (k = nb_input_streams-1; k >= 0 ; k--)
2830 if (fg->inputs[0]->ist == input_streams[k])
2832 ofilter->ost->source_index = k;
2836 /* init framerate emulation */
2837 for (i = 0; i < nb_input_files; i++) {
2838 InputFile *ifile = input_files[i];
2839 if (ifile->rate_emu)
2840 for (j = 0; j < ifile->nb_streams; j++)
2841 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2844 /* for each output stream, we compute the right encoding parameters */
2845 for (i = 0; i < nb_output_streams; i++) {
2846 AVCodecContext *enc_ctx;
2847 AVCodecContext *dec_ctx = NULL;
2848 ost = output_streams[i];
2849 oc = output_files[ost->file_index]->ctx;
2850 ist = get_input_stream(ost);
2852 if (ost->attachment_filename)
2855 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2858 dec_ctx = ist->dec_ctx;
2860 ost->st->disposition = ist->st->disposition;
2861 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2863 for (j=0; j<oc->nb_streams; j++) {
2864 AVStream *st = oc->streams[j];
2865 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2868 if (j == oc->nb_streams)
2869 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2870 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2873 if (ost->stream_copy) {
2875 uint64_t extra_size;
2877 av_assert0(ist && !ost->filter);
2879 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2881 if (extra_size > INT_MAX) {
2882 return AVERROR(EINVAL);
2885 /* if stream_copy is selected, no need to decode or encode */
2886 enc_ctx->codec_id = dec_ctx->codec_id;
2887 enc_ctx->codec_type = dec_ctx->codec_type;
2889 if (!enc_ctx->codec_tag) {
2890 unsigned int codec_tag;
2891 if (!oc->oformat->codec_tag ||
2892 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2893 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2894 enc_ctx->codec_tag = dec_ctx->codec_tag;
2897 enc_ctx->bit_rate = dec_ctx->bit_rate;
2898 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2899 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2900 enc_ctx->field_order = dec_ctx->field_order;
2901 if (dec_ctx->extradata_size) {
2902 enc_ctx->extradata = av_mallocz(extra_size);
2903 if (!enc_ctx->extradata) {
2904 return AVERROR(ENOMEM);
2906 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2908 enc_ctx->extradata_size= dec_ctx->extradata_size;
2909 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2910 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2912 enc_ctx->time_base = ist->st->time_base;
2914 * Avi is a special case here because it supports variable fps but
2915 * having the fps and timebase differe significantly adds quite some
2918 if(!strcmp(oc->oformat->name, "avi")) {
2919 if ( copy_tb<0 && ist->st->r_frame_rate.num
2920 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2921 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2922 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2923 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2925 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2926 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2927 enc_ctx->ticks_per_frame = 2;
2928 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2929 && av_q2d(ist->st->time_base) < 1.0/500
2931 enc_ctx->time_base = dec_ctx->time_base;
2932 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2933 enc_ctx->time_base.den *= 2;
2934 enc_ctx->ticks_per_frame = 2;
2936 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2937 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2938 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2939 && strcmp(oc->oformat->name, "f4v")
2941 if( copy_tb<0 && dec_ctx->time_base.den
2942 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2943 && av_q2d(ist->st->time_base) < 1.0/500
2945 enc_ctx->time_base = dec_ctx->time_base;
2946 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2949 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2950 && dec_ctx->time_base.num < dec_ctx->time_base.den
2951 && dec_ctx->time_base.num > 0
2952 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2953 enc_ctx->time_base = dec_ctx->time_base;
2956 if (!ost->frame_rate.num)
2957 ost->frame_rate = ist->framerate;
2958 if(ost->frame_rate.num)
2959 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2961 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2962 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2964 if (ist->st->nb_side_data) {
2965 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2966 sizeof(*ist->st->side_data));
2967 if (!ost->st->side_data)
2968 return AVERROR(ENOMEM);
2970 ost->st->nb_side_data = 0;
2971 for (j = 0; j < ist->st->nb_side_data; j++) {
2972 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2973 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2975 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2978 sd_dst->data = av_malloc(sd_src->size);
2980 return AVERROR(ENOMEM);
2981 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2982 sd_dst->size = sd_src->size;
2983 sd_dst->type = sd_src->type;
2984 ost->st->nb_side_data++;
2988 ost->parser = av_parser_init(enc_ctx->codec_id);
2990 switch (enc_ctx->codec_type) {
2991 case AVMEDIA_TYPE_AUDIO:
2992 if (audio_volume != 256) {
2993 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2996 enc_ctx->channel_layout = dec_ctx->channel_layout;
2997 enc_ctx->sample_rate = dec_ctx->sample_rate;
2998 enc_ctx->channels = dec_ctx->channels;
2999 enc_ctx->frame_size = dec_ctx->frame_size;
3000 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
3001 enc_ctx->block_align = dec_ctx->block_align;
3002 enc_ctx->initial_padding = dec_ctx->delay;
3003 enc_ctx->profile = dec_ctx->profile;
3004 #if FF_API_AUDIOENC_DELAY
3005 enc_ctx->delay = dec_ctx->delay;
3007 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
3008 enc_ctx->block_align= 0;
3009 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3010 enc_ctx->block_align= 0;
3012 case AVMEDIA_TYPE_VIDEO:
3013 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3014 enc_ctx->colorspace = dec_ctx->colorspace;
3015 enc_ctx->color_range = dec_ctx->color_range;
3016 enc_ctx->color_primaries = dec_ctx->color_primaries;
3017 enc_ctx->color_trc = dec_ctx->color_trc;
3018 enc_ctx->width = dec_ctx->width;
3019 enc_ctx->height = dec_ctx->height;
3020 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3021 enc_ctx->profile = dec_ctx->profile;
3022 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3024 av_mul_q(ost->frame_aspect_ratio,
3025 (AVRational){ enc_ctx->height, enc_ctx->width });
3026 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3027 "with stream copy may produce invalid files\n");
3029 else if (ist->st->sample_aspect_ratio.num)
3030 sar = ist->st->sample_aspect_ratio;
3032 sar = dec_ctx->sample_aspect_ratio;
3033 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3034 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3035 ost->st->r_frame_rate = ist->st->r_frame_rate;
3037 case AVMEDIA_TYPE_SUBTITLE:
3038 enc_ctx->width = dec_ctx->width;
3039 enc_ctx->height = dec_ctx->height;
3041 case AVMEDIA_TYPE_UNKNOWN:
3042 case AVMEDIA_TYPE_DATA:
3043 case AVMEDIA_TYPE_ATTACHMENT:
3049 set_encoder_id(output_files[ost->file_index], ost);
3052 if (qsv_transcode_init(ost))
3057 if (cuvid_transcode_init(ost))
3061 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3062 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3063 filtergraph_is_simple(ost->filter->graph)) {
3064 FilterGraph *fg = ost->filter->graph;
3065 if (configure_filtergraph(fg)) {
3066 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3071 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3072 if (!ost->frame_rate.num)
3073 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3074 if (ist && !ost->frame_rate.num)
3075 ost->frame_rate = ist->framerate;
3076 if (ist && !ost->frame_rate.num)
3077 ost->frame_rate = ist->st->r_frame_rate;
3078 if (ist && !ost->frame_rate.num) {
3079 ost->frame_rate = (AVRational){25, 1};
3080 av_log(NULL, AV_LOG_WARNING,
3082 "about the input framerate is available. Falling "
3083 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3084 "if you want a different framerate.\n",
3085 ost->file_index, ost->index);
3087 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3088 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3089 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3090 ost->frame_rate = ost->enc->supported_framerates[idx];
3092 // reduce frame rate for mpeg4 to be within the spec limits
3093 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3094 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3095 ost->frame_rate.num, ost->frame_rate.den, 65535);
3099 switch (enc_ctx->codec_type) {
3100 case AVMEDIA_TYPE_AUDIO:
3101 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3103 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3104 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3105 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3106 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3107 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3108 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3110 case AVMEDIA_TYPE_VIDEO:
3111 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3112 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3113 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3114 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3115 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3116 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3117 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3119 for (j = 0; j < ost->forced_kf_count; j++)
3120 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3122 enc_ctx->time_base);
3124 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3125 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3126 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3127 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3128 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3129 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3130 if (!strncmp(ost->enc->name, "libx264", 7) &&
3131 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3132 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3133 av_log(NULL, AV_LOG_WARNING,
3134 "No pixel format specified, %s for H.264 encoding chosen.\n"
3135 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3136 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3137 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3138 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3139 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3140 av_log(NULL, AV_LOG_WARNING,
3141 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3142 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3143 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3144 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3146 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3147 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3149 ost->st->avg_frame_rate = ost->frame_rate;
3152 enc_ctx->width != dec_ctx->width ||
3153 enc_ctx->height != dec_ctx->height ||
3154 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3155 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3158 if (ost->forced_keyframes) {
3159 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3160 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3161 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3163 av_log(NULL, AV_LOG_ERROR,
3164 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3167 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3168 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3169 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3170 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3172 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3173 // parse it only for static kf timings
3174 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3175 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3179 case AVMEDIA_TYPE_SUBTITLE:
3180 enc_ctx->time_base = (AVRational){1, 1000};
3181 if (!enc_ctx->width) {
3182 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3183 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3186 case AVMEDIA_TYPE_DATA:
3194 if (ost->disposition) {
3195 static const AVOption opts[] = {
3196 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3197 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3198 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3199 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3200 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3201 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3202 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3203 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3204 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3205 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3206 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3207 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3208 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3209 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3212 static const AVClass class = {
3214 .item_name = av_default_item_name,
3216 .version = LIBAVUTIL_VERSION_INT,
3218 const AVClass *pclass = &class;
3220 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3226 /* init input streams */
3227 for (i = 0; i < nb_input_streams; i++)
3228 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3229 for (i = 0; i < nb_output_streams; i++) {
3230 ost = output_streams[i];
3231 avcodec_close(ost->enc_ctx);
3236 /* open each encoder */
3237 for (i = 0; i < nb_output_streams; i++) {
3238 ret = init_output_stream(output_streams[i], error, sizeof(error));
3243 /* discard unused programs */
3244 for (i = 0; i < nb_input_files; i++) {
3245 InputFile *ifile = input_files[i];
3246 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3247 AVProgram *p = ifile->ctx->programs[j];
3248 int discard = AVDISCARD_ALL;
3250 for (k = 0; k < p->nb_stream_indexes; k++)
3251 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3252 discard = AVDISCARD_DEFAULT;
3255 p->discard = discard;
3259 /* open files and write file headers */
3260 for (i = 0; i < nb_output_files; i++) {
3261 oc = output_files[i]->ctx;
3262 oc->interrupt_callback = int_cb;
3263 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3264 snprintf(error, sizeof(error),
3265 "Could not write header for output file #%d "
3266 "(incorrect codec parameters ?): %s",
3267 i, av_err2str(ret));
3268 ret = AVERROR(EINVAL);
3271 // assert_avoptions(output_files[i]->opts);
3272 if (strcmp(oc->oformat->name, "rtp")) {
3278 /* dump the file output parameters - cannot be done before in case
3280 for (i = 0; i < nb_output_files; i++) {
3281 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3284 /* dump the stream mapping */
3285 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3286 for (i = 0; i < nb_input_streams; i++) {
3287 ist = input_streams[i];
3289 for (j = 0; j < ist->nb_filters; j++) {
3290 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3291 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3292 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3293 ist->filters[j]->name);
3294 if (nb_filtergraphs > 1)
3295 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3296 av_log(NULL, AV_LOG_INFO, "\n");
3301 for (i = 0; i < nb_output_streams; i++) {
3302 ost = output_streams[i];
3304 if (ost->attachment_filename) {
3305 /* an attached file */
3306 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3307 ost->attachment_filename, ost->file_index, ost->index);
3311 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3312 /* output from a complex graph */
3313 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3314 if (nb_filtergraphs > 1)
3315 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3317 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3318 ost->index, ost->enc ? ost->enc->name : "?");
3322 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3323 input_streams[ost->source_index]->file_index,
3324 input_streams[ost->source_index]->st->index,
3327 if (ost->sync_ist != input_streams[ost->source_index])
3328 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3329 ost->sync_ist->file_index,
3330 ost->sync_ist->st->index);
3331 if (ost->stream_copy)
3332 av_log(NULL, AV_LOG_INFO, " (copy)");
3334 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3335 const AVCodec *out_codec = ost->enc;
3336 const char *decoder_name = "?";
3337 const char *in_codec_name = "?";
3338 const char *encoder_name = "?";
3339 const char *out_codec_name = "?";
3340 const AVCodecDescriptor *desc;
3343 decoder_name = in_codec->name;
3344 desc = avcodec_descriptor_get(in_codec->id);
3346 in_codec_name = desc->name;
3347 if (!strcmp(decoder_name, in_codec_name))
3348 decoder_name = "native";
3352 encoder_name = out_codec->name;
3353 desc = avcodec_descriptor_get(out_codec->id);
3355 out_codec_name = desc->name;
3356 if (!strcmp(encoder_name, out_codec_name))
3357 encoder_name = "native";
3360 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3361 in_codec_name, decoder_name,
3362 out_codec_name, encoder_name);
3364 av_log(NULL, AV_LOG_INFO, "\n");
3368 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3372 if (sdp_filename || want_sdp) {
3376 transcode_init_done = 1;
3381 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3382 static int need_output(void)
3386 for (i = 0; i < nb_output_streams; i++) {
3387 OutputStream *ost = output_streams[i];
3388 OutputFile *of = output_files[ost->file_index];
3389 AVFormatContext *os = output_files[ost->file_index]->ctx;
3391 if (ost->finished ||
3392 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3394 if (ost->frame_number >= ost->max_frames) {
3396 for (j = 0; j < of->ctx->nb_streams; j++)
3397 close_output_stream(output_streams[of->ost_index + j]);
3408 * Select the output stream to process.
3410 * @return selected output stream, or NULL if none available
3412 static OutputStream *choose_output(void)
3415 int64_t opts_min = INT64_MAX;
3416 OutputStream *ost_min = NULL;
3418 for (i = 0; i < nb_output_streams; i++) {
3419 OutputStream *ost = output_streams[i];
3420 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3421 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3423 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3424 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3426 if (!ost->finished && opts < opts_min) {
3428 ost_min = ost->unavailable ? NULL : ost;
3434 static void set_tty_echo(int on)
3438 if (tcgetattr(0, &tty) == 0) {
3439 if (on) tty.c_lflag |= ECHO;
3440 else tty.c_lflag &= ~ECHO;
3441 tcsetattr(0, TCSANOW, &tty);
3446 static int check_keyboard_interaction(int64_t cur_time)
3449 static int64_t last_time;
3450 if (received_nb_signals)
3451 return AVERROR_EXIT;
3452 /* read_key() returns 0 on EOF */
3453 if(cur_time - last_time >= 100000 && !run_as_daemon){
3455 last_time = cur_time;
3459 return AVERROR_EXIT;
3460 if (key == '+') av_log_set_level(av_log_get_level()+10);
3461 if (key == '-') av_log_set_level(av_log_get_level()-10);
3462 if (key == 's') qp_hist ^= 1;
3465 do_hex_dump = do_pkt_dump = 0;
3466 } else if(do_pkt_dump){
3470 av_log_set_level(AV_LOG_DEBUG);
3472 if (key == 'c' || key == 'C'){
3473 char buf[4096], target[64], command[256], arg[256] = {0};
3476 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3479 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3484 fprintf(stderr, "\n");
3486 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3487 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3488 target, time, command, arg);
3489 for (i = 0; i < nb_filtergraphs; i++) {
3490 FilterGraph *fg = filtergraphs[i];
3493 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3494 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3495 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3496 } else if (key == 'c') {
3497 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3498 ret = AVERROR_PATCHWELCOME;
3500 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3502 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3507 av_log(NULL, AV_LOG_ERROR,
3508 "Parse error, at least 3 arguments were expected, "
3509 "only %d given in string '%s'\n", n, buf);
3512 if (key == 'd' || key == 'D'){
3515 debug = input_streams[0]->st->codec->debug<<1;
3516 if(!debug) debug = 1;
3517 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3524 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3529 fprintf(stderr, "\n");
3530 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3531 fprintf(stderr,"error parsing debug value\n");
3533 for(i=0;i<nb_input_streams;i++) {
3534 input_streams[i]->st->codec->debug = debug;
3536 for(i=0;i<nb_output_streams;i++) {
3537 OutputStream *ost = output_streams[i];
3538 ost->enc_ctx->debug = debug;
3540 if(debug) av_log_set_level(AV_LOG_DEBUG);
3541 fprintf(stderr,"debug=%d\n", debug);
3544 fprintf(stderr, "key function\n"
3545 "? show this help\n"
3546 "+ increase verbosity\n"
3547 "- decrease verbosity\n"
3548 "c Send command to first matching filter supporting it\n"
3549 "C Send/Que command to all matching filters\n"
3550 "D cycle through available debug modes\n"
3551 "h dump packets/hex press to cycle through the 3 states\n"
3553 "s Show QP histogram\n"
3560 static void *input_thread(void *arg)
3563 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3568 ret = av_read_frame(f->ctx, &pkt);
3570 if (ret == AVERROR(EAGAIN)) {
3575 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3578 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3579 if (flags && ret == AVERROR(EAGAIN)) {
3581 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3582 av_log(f->ctx, AV_LOG_WARNING,
3583 "Thread message queue blocking; consider raising the "
3584 "thread_queue_size option (current value: %d)\n",
3585 f->thread_queue_size);
3588 if (ret != AVERROR_EOF)
3589 av_log(f->ctx, AV_LOG_ERROR,
3590 "Unable to send packet to main thread: %s\n",
3592 av_packet_unref(&pkt);
3593 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3601 static void free_input_threads(void)
3605 for (i = 0; i < nb_input_files; i++) {
3606 InputFile *f = input_files[i];
3609 if (!f || !f->in_thread_queue)
3611 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3612 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3613 av_packet_unref(&pkt);
3615 pthread_join(f->thread, NULL);
3617 av_thread_message_queue_free(&f->in_thread_queue);
3621 static int init_input_threads(void)
3625 if (nb_input_files == 1)
3628 for (i = 0; i < nb_input_files; i++) {
3629 InputFile *f = input_files[i];
3631 if (f->ctx->pb ? !f->ctx->pb->seekable :
3632 strcmp(f->ctx->iformat->name, "lavfi"))
3633 f->non_blocking = 1;
3634 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3635 f->thread_queue_size, sizeof(AVPacket));
3639 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3640 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3641 av_thread_message_queue_free(&f->in_thread_queue);
3642 return AVERROR(ret);
3648 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3650 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3652 AV_THREAD_MESSAGE_NONBLOCK : 0);
3656 static int get_input_packet(InputFile *f, AVPacket *pkt)
3660 for (i = 0; i < f->nb_streams; i++) {
3661 InputStream *ist = input_streams[f->ist_index + i];
3662 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3663 int64_t now = av_gettime_relative() - ist->start;
3665 return AVERROR(EAGAIN);
3670 if (nb_input_files > 1)
3671 return get_input_packet_mt(f, pkt);
3673 return av_read_frame(f->ctx, pkt);
3676 static int got_eagain(void)
3679 for (i = 0; i < nb_output_streams; i++)
3680 if (output_streams[i]->unavailable)
3685 static void reset_eagain(void)
3688 for (i = 0; i < nb_input_files; i++)
3689 input_files[i]->eagain = 0;
3690 for (i = 0; i < nb_output_streams; i++)
3691 output_streams[i]->unavailable = 0;
3694 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3695 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3696 AVRational time_base)
3702 return tmp_time_base;
3705 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3708 return tmp_time_base;
3714 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3717 AVCodecContext *avctx;
3718 int i, ret, has_audio = 0;
3719 int64_t duration = 0;
3721 ret = av_seek_frame(is, -1, is->start_time, 0);
3725 for (i = 0; i < ifile->nb_streams; i++) {
3726 ist = input_streams[ifile->ist_index + i];
3727 avctx = ist->dec_ctx;
3730 if (ist->decoding_needed) {
3731 process_input_packet(ist, NULL, 1);
3732 avcodec_flush_buffers(avctx);
3735 /* duration is the length of the last frame in a stream
3736 * when audio stream is present we don't care about
3737 * last video frame length because it's not defined exactly */
3738 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3742 for (i = 0; i < ifile->nb_streams; i++) {
3743 ist = input_streams[ifile->ist_index + i];
3744 avctx = ist->dec_ctx;
3747 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3748 AVRational sample_rate = {1, avctx->sample_rate};
3750 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3754 if (ist->framerate.num) {
3755 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3756 } else if (ist->st->avg_frame_rate.num) {
3757 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3758 } else duration = 1;
3760 if (!ifile->duration)
3761 ifile->time_base = ist->st->time_base;
3762 /* the total duration of the stream, max_pts - min_pts is
3763 * the duration of the stream without the last frame */
3764 duration += ist->max_pts - ist->min_pts;
3765 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3769 if (ifile->loop > 0)
3777 * - 0 -- one packet was read and processed
3778 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3779 * this function should be called again
3780 * - AVERROR_EOF -- this function should not be called again
3782 static int process_input(int file_index)
3784 InputFile *ifile = input_files[file_index];
3785 AVFormatContext *is;
3793 ret = get_input_packet(ifile, &pkt);
3795 if (ret == AVERROR(EAGAIN)) {
3799 if (ret < 0 && ifile->loop) {
3800 if ((ret = seek_to_start(ifile, is)) < 0)
3802 ret = get_input_packet(ifile, &pkt);
3803 if (ret == AVERROR(EAGAIN)) {
3809 if (ret != AVERROR_EOF) {
3810 print_error(is->filename, ret);
3815 for (i = 0; i < ifile->nb_streams; i++) {
3816 ist = input_streams[ifile->ist_index + i];
3817 if (ist->decoding_needed) {
3818 ret = process_input_packet(ist, NULL, 0);
3823 /* mark all outputs that don't go through lavfi as finished */
3824 for (j = 0; j < nb_output_streams; j++) {
3825 OutputStream *ost = output_streams[j];
3827 if (ost->source_index == ifile->ist_index + i &&
3828 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3829 finish_output_stream(ost);
3833 ifile->eof_reached = 1;
3834 return AVERROR(EAGAIN);
3840 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3841 is->streams[pkt.stream_index]);
3843 /* the following test is needed in case new streams appear
3844 dynamically in stream : we ignore them */
3845 if (pkt.stream_index >= ifile->nb_streams) {
3846 report_new_stream(file_index, &pkt);
3847 goto discard_packet;
3850 ist = input_streams[ifile->ist_index + pkt.stream_index];
3852 ist->data_size += pkt.size;
3856 goto discard_packet;
3858 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3859 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3864 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3865 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3866 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3867 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3868 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3869 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3870 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3871 av_ts2str(input_files[ist->file_index]->ts_offset),
3872 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3875 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3876 int64_t stime, stime2;
3877 // Correcting starttime based on the enabled streams
3878 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3879 // so we instead do it here as part of discontinuity handling
3880 if ( ist->next_dts == AV_NOPTS_VALUE
3881 && ifile->ts_offset == -is->start_time
3882 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3883 int64_t new_start_time = INT64_MAX;
3884 for (i=0; i<is->nb_streams; i++) {
3885 AVStream *st = is->streams[i];
3886 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3888 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3890 if (new_start_time > is->start_time) {
3891 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3892 ifile->ts_offset = -new_start_time;
3896 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3897 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3898 ist->wrap_correction_done = 1;
3900 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3901 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3902 ist->wrap_correction_done = 0;
3904 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3905 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3906 ist->wrap_correction_done = 0;
3910 /* add the stream-global side data to the first packet */
3911 if (ist->nb_packets == 1) {
3912 if (ist->st->nb_side_data)
3913 av_packet_split_side_data(&pkt);
3914 for (i = 0; i < ist->st->nb_side_data; i++) {
3915 AVPacketSideData *src_sd = &ist->st->side_data[i];
3918 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3920 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3923 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3927 memcpy(dst_data, src_sd->data, src_sd->size);
3931 if (pkt.dts != AV_NOPTS_VALUE)
3932 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3933 if (pkt.pts != AV_NOPTS_VALUE)
3934 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3936 if (pkt.pts != AV_NOPTS_VALUE)
3937 pkt.pts *= ist->ts_scale;
3938 if (pkt.dts != AV_NOPTS_VALUE)
3939 pkt.dts *= ist->ts_scale;
3941 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3942 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3943 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3944 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3945 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3946 int64_t delta = pkt_dts - ifile->last_ts;
3947 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3948 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3949 ifile->ts_offset -= delta;
3950 av_log(NULL, AV_LOG_DEBUG,
3951 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3952 delta, ifile->ts_offset);
3953 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3954 if (pkt.pts != AV_NOPTS_VALUE)
3955 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3959 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3960 if (pkt.pts != AV_NOPTS_VALUE) {
3961 pkt.pts += duration;
3962 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3963 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3966 if (pkt.dts != AV_NOPTS_VALUE)
3967 pkt.dts += duration;
3969 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3970 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3971 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3972 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3974 int64_t delta = pkt_dts - ist->next_dts;
3975 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3976 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3977 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3978 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3979 ifile->ts_offset -= delta;
3980 av_log(NULL, AV_LOG_DEBUG,
3981 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3982 delta, ifile->ts_offset);
3983 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3984 if (pkt.pts != AV_NOPTS_VALUE)
3985 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3988 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3989 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3990 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3991 pkt.dts = AV_NOPTS_VALUE;
3993 if (pkt.pts != AV_NOPTS_VALUE){
3994 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3995 delta = pkt_pts - ist->next_dts;
3996 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3997 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3998 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3999 pkt.pts = AV_NOPTS_VALUE;
4005 if (pkt.dts != AV_NOPTS_VALUE)
4006 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4009 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4010 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4011 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4012 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4013 av_ts2str(input_files[ist->file_index]->ts_offset),
4014 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4017 sub2video_heartbeat(ist, pkt.pts);
4019 process_input_packet(ist, &pkt, 0);
4022 av_packet_unref(&pkt);
4028 * Perform a step of transcoding for the specified filter graph.
4030 * @param[in] graph filter graph to consider
4031 * @param[out] best_ist input stream where a frame would allow to continue
4032 * @return 0 for success, <0 for error
4034 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4037 int nb_requests, nb_requests_max = 0;
4038 InputFilter *ifilter;
4042 ret = avfilter_graph_request_oldest(graph->graph);
4044 return reap_filters(0);
4046 if (ret == AVERROR_EOF) {
4047 ret = reap_filters(1);
4048 for (i = 0; i < graph->nb_outputs; i++)
4049 close_output_stream(graph->outputs[i]->ost);
4052 if (ret != AVERROR(EAGAIN))
4055 for (i = 0; i < graph->nb_inputs; i++) {
4056 ifilter = graph->inputs[i];
4058 if (input_files[ist->file_index]->eagain ||
4059 input_files[ist->file_index]->eof_reached)
4061 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4062 if (nb_requests > nb_requests_max) {
4063 nb_requests_max = nb_requests;
4069 for (i = 0; i < graph->nb_outputs; i++)
4070 graph->outputs[i]->ost->unavailable = 1;
4076 * Run a single step of transcoding.
4078 * @return 0 for success, <0 for error
4080 static int transcode_step(void)
4086 ost = choose_output();
4093 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4098 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4103 av_assert0(ost->source_index >= 0);
4104 ist = input_streams[ost->source_index];
4107 ret = process_input(ist->file_index);
4108 if (ret == AVERROR(EAGAIN)) {
4109 if (input_files[ist->file_index]->eagain)
4110 ost->unavailable = 1;
4115 return ret == AVERROR_EOF ? 0 : ret;
4117 return reap_filters(0);
4121 * The following code is the main loop of the file converter
4123 static int transcode(void)
4126 AVFormatContext *os;
4129 int64_t timer_start;
4130 int64_t total_packets_written = 0;
4132 ret = transcode_init();
4136 if (stdin_interaction) {
4137 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4140 timer_start = av_gettime_relative();
4143 if ((ret = init_input_threads()) < 0)
4147 while (!received_sigterm) {
4148 int64_t cur_time= av_gettime_relative();
4150 /* if 'q' pressed, exits */
4151 if (stdin_interaction)
4152 if (check_keyboard_interaction(cur_time) < 0)
4155 /* check if there's any stream where output is still needed */
4156 if (!need_output()) {
4157 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4161 ret = transcode_step();
4162 if (ret < 0 && ret != AVERROR_EOF) {
4164 av_strerror(ret, errbuf, sizeof(errbuf));
4166 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4170 /* dump report by using the output first video and audio streams */
4171 print_report(0, timer_start, cur_time);
4174 free_input_threads();
4177 /* at the end of stream, we must flush the decoder buffers */
4178 for (i = 0; i < nb_input_streams; i++) {
4179 ist = input_streams[i];
4180 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4181 process_input_packet(ist, NULL, 0);
4188 /* write the trailer if needed and close file */
4189 for (i = 0; i < nb_output_files; i++) {
4190 os = output_files[i]->ctx;
4191 if ((ret = av_write_trailer(os)) < 0) {
4192 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4198 /* dump report by using the first video and audio streams */
4199 print_report(1, timer_start, av_gettime_relative());
4201 /* close each encoder */
4202 for (i = 0; i < nb_output_streams; i++) {
4203 ost = output_streams[i];
4204 if (ost->encoding_needed) {
4205 av_freep(&ost->enc_ctx->stats_in);
4207 total_packets_written += ost->packets_written;
4210 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4211 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4215 /* close each decoder */
4216 for (i = 0; i < nb_input_streams; i++) {
4217 ist = input_streams[i];
4218 if (ist->decoding_needed) {
4219 avcodec_close(ist->dec_ctx);
4220 if (ist->hwaccel_uninit)
4221 ist->hwaccel_uninit(ist->dec_ctx);
4225 av_buffer_unref(&hw_device_ctx);
4232 free_input_threads();
4235 if (output_streams) {
4236 for (i = 0; i < nb_output_streams; i++) {
4237 ost = output_streams[i];
4240 if (fclose(ost->logfile))
4241 av_log(NULL, AV_LOG_ERROR,
4242 "Error closing logfile, loss of information possible: %s\n",
4243 av_err2str(AVERROR(errno)));
4244 ost->logfile = NULL;
4246 av_freep(&ost->forced_kf_pts);
4247 av_freep(&ost->apad);
4248 av_freep(&ost->disposition);
4249 av_dict_free(&ost->encoder_opts);
4250 av_dict_free(&ost->sws_dict);
4251 av_dict_free(&ost->swr_opts);
4252 av_dict_free(&ost->resample_opts);
4260 static int64_t getutime(void)
4263 struct rusage rusage;
4265 getrusage(RUSAGE_SELF, &rusage);
4266 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4267 #elif HAVE_GETPROCESSTIMES
4269 FILETIME c, e, k, u;
4270 proc = GetCurrentProcess();
4271 GetProcessTimes(proc, &c, &e, &k, &u);
4272 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4274 return av_gettime_relative();
4278 static int64_t getmaxrss(void)
4280 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4281 struct rusage rusage;
4282 getrusage(RUSAGE_SELF, &rusage);
4283 return (int64_t)rusage.ru_maxrss * 1024;
4284 #elif HAVE_GETPROCESSMEMORYINFO
4286 PROCESS_MEMORY_COUNTERS memcounters;
4287 proc = GetCurrentProcess();
4288 memcounters.cb = sizeof(memcounters);
4289 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4290 return memcounters.PeakPagefileUsage;
4296 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4300 int main(int argc, char **argv)
4307 register_exit(ffmpeg_cleanup);
4309 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4311 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4312 parse_loglevel(argc, argv, options);
4314 if(argc>1 && !strcmp(argv[1], "-d")){
4316 av_log_set_callback(log_callback_null);
4321 avcodec_register_all();
4323 avdevice_register_all();
4325 avfilter_register_all();
4327 avformat_network_init();
4329 show_banner(argc, argv, options);
4333 /* parse options and open all input/output files */
4334 ret = ffmpeg_parse_options(argc, argv);
4338 if (nb_output_files <= 0 && nb_input_files == 0) {
4340 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4344 /* file converter / grab */
4345 if (nb_output_files <= 0) {
4346 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4350 // if (nb_input_files == 0) {
4351 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4355 current_time = ti = getutime();
4356 if (transcode() < 0)
4358 ti = getutime() - ti;
4360 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4362 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4363 decode_error_stat[0], decode_error_stat[1]);
4364 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4367 exit_program(received_nb_signals ? 255 : main_return_code);
4368 return main_return_code;