2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
201 pal = (uint32_t *)r->data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
697 if (pkt->size == 0 && pkt->side_data_elems == 0)
699 if (!ost->st->codecpar->extradata && avctx->extradata) {
700 ost->st->codecpar->extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
701 if (!ost->st->codecpar->extradata) {
702 av_log(NULL, AV_LOG_ERROR, "Could not allocate extradata buffer to copy parser data.\n");
705 ost->st->codecpar->extradata_size = avctx->extradata_size;
706 memcpy(ost->st->codecpar->extradata, avctx->extradata, avctx->extradata_size);
709 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
710 if (pkt->dts != AV_NOPTS_VALUE &&
711 pkt->pts != AV_NOPTS_VALUE &&
712 pkt->dts > pkt->pts) {
713 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
715 ost->file_index, ost->st->index);
717 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
718 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
719 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
722 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
723 pkt->dts != AV_NOPTS_VALUE &&
724 !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
725 ost->last_mux_dts != AV_NOPTS_VALUE) {
726 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
727 if (pkt->dts < max) {
728 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
729 av_log(s, loglevel, "Non-monotonous DTS in output stream "
730 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
731 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
733 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
736 av_log(s, loglevel, "changing to %"PRId64". This may result "
737 "in incorrect timestamps in the output file.\n",
739 if(pkt->pts >= pkt->dts)
740 pkt->pts = FFMAX(pkt->pts, max);
745 ost->last_mux_dts = pkt->dts;
747 ost->data_size += pkt->size;
748 ost->packets_written++;
750 pkt->stream_index = ost->index;
753 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
754 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
755 av_get_media_type_string(ost->enc_ctx->codec_type),
756 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
757 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
762 ret = av_interleaved_write_frame(s, pkt);
764 print_error("av_interleaved_write_frame()", ret);
765 main_return_code = 1;
766 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
768 av_packet_unref(pkt);
771 static void close_output_stream(OutputStream *ost)
773 OutputFile *of = output_files[ost->file_index];
775 ost->finished |= ENCODER_FINISHED;
777 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
778 of->recording_time = FFMIN(of->recording_time, end);
782 static int check_recording_time(OutputStream *ost)
784 OutputFile *of = output_files[ost->file_index];
786 if (of->recording_time != INT64_MAX &&
787 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
788 AV_TIME_BASE_Q) >= 0) {
789 close_output_stream(ost);
795 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
798 AVCodecContext *enc = ost->enc_ctx;
802 av_init_packet(&pkt);
806 if (!check_recording_time(ost))
809 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
810 frame->pts = ost->sync_opts;
811 ost->sync_opts = frame->pts + frame->nb_samples;
812 ost->samples_encoded += frame->nb_samples;
813 ost->frames_encoded++;
815 av_assert0(pkt.size || !pkt.data);
816 update_benchmark(NULL);
818 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
819 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
820 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
821 enc->time_base.num, enc->time_base.den);
824 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
825 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
828 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
831 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
834 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
835 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
836 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
837 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
840 write_frame(s, &pkt, ost);
844 static void do_subtitle_out(AVFormatContext *s,
849 int subtitle_out_max_size = 1024 * 1024;
850 int subtitle_out_size, nb, i;
855 if (sub->pts == AV_NOPTS_VALUE) {
856 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
865 subtitle_out = av_malloc(subtitle_out_max_size);
867 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
872 /* Note: DVB subtitle need one packet to draw them and one other
873 packet to clear them */
874 /* XXX: signal it in the codec context ? */
875 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
880 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
882 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
883 pts -= output_files[ost->file_index]->start_time;
884 for (i = 0; i < nb; i++) {
885 unsigned save_num_rects = sub->num_rects;
887 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
888 if (!check_recording_time(ost))
892 // start_display_time is required to be 0
893 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
894 sub->end_display_time -= sub->start_display_time;
895 sub->start_display_time = 0;
899 ost->frames_encoded++;
901 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
902 subtitle_out_max_size, sub);
904 sub->num_rects = save_num_rects;
905 if (subtitle_out_size < 0) {
906 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
910 av_init_packet(&pkt);
911 pkt.data = subtitle_out;
912 pkt.size = subtitle_out_size;
913 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
914 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
915 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
916 /* XXX: the pts correction is handled here. Maybe handling
917 it in the codec would be better */
919 pkt.pts += 90 * sub->start_display_time;
921 pkt.pts += 90 * sub->end_display_time;
924 write_frame(s, &pkt, ost);
928 static void do_video_out(AVFormatContext *s,
930 AVFrame *next_picture,
933 int ret, format_video_sync;
935 AVCodecContext *enc = ost->enc_ctx;
936 AVCodecContext *mux_enc = ost->st->codec;
937 int nb_frames, nb0_frames, i;
938 double delta, delta0;
941 InputStream *ist = NULL;
942 AVFilterContext *filter = ost->filter->filter;
944 if (ost->source_index >= 0)
945 ist = input_streams[ost->source_index];
947 if (filter->inputs[0]->frame_rate.num > 0 &&
948 filter->inputs[0]->frame_rate.den > 0)
949 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
951 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
952 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
954 if (!ost->filters_script &&
958 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
959 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
964 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
965 ost->last_nb0_frames[1],
966 ost->last_nb0_frames[2]);
968 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
969 delta = delta0 + duration;
971 /* by default, we output a single frame */
972 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
975 format_video_sync = video_sync_method;
976 if (format_video_sync == VSYNC_AUTO) {
977 if(!strcmp(s->oformat->name, "avi")) {
978 format_video_sync = VSYNC_VFR;
980 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
982 && format_video_sync == VSYNC_CFR
983 && input_files[ist->file_index]->ctx->nb_streams == 1
984 && input_files[ist->file_index]->input_ts_offset == 0) {
985 format_video_sync = VSYNC_VSCFR;
987 if (format_video_sync == VSYNC_CFR && copy_ts) {
988 format_video_sync = VSYNC_VSCFR;
991 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
995 format_video_sync != VSYNC_PASSTHROUGH &&
996 format_video_sync != VSYNC_DROP) {
998 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1000 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1001 sync_ipts = ost->sync_opts;
1006 switch (format_video_sync) {
1008 if (ost->frame_number == 0 && delta0 >= 0.5) {
1009 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1012 ost->sync_opts = lrint(sync_ipts);
1015 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1016 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1018 } else if (delta < -1.1)
1020 else if (delta > 1.1) {
1021 nb_frames = lrintf(delta);
1023 nb0_frames = lrintf(delta0 - 0.6);
1029 else if (delta > 0.6)
1030 ost->sync_opts = lrint(sync_ipts);
1033 case VSYNC_PASSTHROUGH:
1034 ost->sync_opts = lrint(sync_ipts);
1041 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1042 nb0_frames = FFMIN(nb0_frames, nb_frames);
1044 memmove(ost->last_nb0_frames + 1,
1045 ost->last_nb0_frames,
1046 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1047 ost->last_nb0_frames[0] = nb0_frames;
1049 if (nb0_frames == 0 && ost->last_dropped) {
1051 av_log(NULL, AV_LOG_VERBOSE,
1052 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1053 ost->frame_number, ost->st->index, ost->last_frame->pts);
1055 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1056 if (nb_frames > dts_error_threshold * 30) {
1057 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1061 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1062 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1064 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1066 /* duplicates frame if needed */
1067 for (i = 0; i < nb_frames; i++) {
1068 AVFrame *in_picture;
1069 av_init_packet(&pkt);
1073 if (i < nb0_frames && ost->last_frame) {
1074 in_picture = ost->last_frame;
1076 in_picture = next_picture;
1081 in_picture->pts = ost->sync_opts;
1084 if (!check_recording_time(ost))
1086 if (ost->frame_number >= ost->max_frames)
1090 #if FF_API_LAVF_FMT_RAWPICTURE
1091 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1092 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1093 /* raw pictures are written as AVPicture structure to
1094 avoid any copies. We support temporarily the older
1096 if (in_picture->interlaced_frame)
1097 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1099 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1100 pkt.data = (uint8_t *)in_picture;
1101 pkt.size = sizeof(AVPicture);
1102 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1103 pkt.flags |= AV_PKT_FLAG_KEY;
1105 write_frame(s, &pkt, ost);
1109 int got_packet, forced_keyframe = 0;
1112 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1113 ost->top_field_first >= 0)
1114 in_picture->top_field_first = !!ost->top_field_first;
1116 if (in_picture->interlaced_frame) {
1117 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1118 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1120 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1122 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1124 in_picture->quality = enc->global_quality;
1125 in_picture->pict_type = 0;
1127 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1128 in_picture->pts * av_q2d(enc->time_base) : NAN;
1129 if (ost->forced_kf_index < ost->forced_kf_count &&
1130 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1131 ost->forced_kf_index++;
1132 forced_keyframe = 1;
1133 } else if (ost->forced_keyframes_pexpr) {
1135 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1136 res = av_expr_eval(ost->forced_keyframes_pexpr,
1137 ost->forced_keyframes_expr_const_values, NULL);
1138 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1139 ost->forced_keyframes_expr_const_values[FKF_N],
1140 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1141 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1142 ost->forced_keyframes_expr_const_values[FKF_T],
1143 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1146 forced_keyframe = 1;
1147 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1148 ost->forced_keyframes_expr_const_values[FKF_N];
1149 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1150 ost->forced_keyframes_expr_const_values[FKF_T];
1151 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1154 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1155 } else if ( ost->forced_keyframes
1156 && !strncmp(ost->forced_keyframes, "source", 6)
1157 && in_picture->key_frame==1) {
1158 forced_keyframe = 1;
1161 if (forced_keyframe) {
1162 in_picture->pict_type = AV_PICTURE_TYPE_I;
1163 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1166 update_benchmark(NULL);
1168 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1169 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1170 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1171 enc->time_base.num, enc->time_base.den);
1174 ost->frames_encoded++;
1176 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1177 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1179 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1185 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1188 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1191 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1192 pkt.pts = ost->sync_opts;
1194 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1197 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1198 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1199 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1200 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1203 frame_size = pkt.size;
1204 write_frame(s, &pkt, ost);
1206 /* if two pass, output log */
1207 if (ost->logfile && enc->stats_out) {
1208 fprintf(ost->logfile, "%s", enc->stats_out);
1214 * For video, number of frames in == number of packets out.
1215 * But there may be reordering, so we can't throw away frames on encoder
1216 * flush, we need to limit them here, before they go into encoder.
1218 ost->frame_number++;
1220 if (vstats_filename && frame_size)
1221 do_video_stats(ost, frame_size);
1224 if (!ost->last_frame)
1225 ost->last_frame = av_frame_alloc();
1226 av_frame_unref(ost->last_frame);
1227 if (next_picture && ost->last_frame)
1228 av_frame_ref(ost->last_frame, next_picture);
1230 av_frame_free(&ost->last_frame);
1233 static double psnr(double d)
1235 return -10.0 * log10(d);
1238 static void do_video_stats(OutputStream *ost, int frame_size)
1240 AVCodecContext *enc;
1242 double ti1, bitrate, avg_bitrate;
1244 /* this is executed just the first time do_video_stats is called */
1246 vstats_file = fopen(vstats_filename, "w");
1254 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1255 frame_number = ost->st->nb_frames;
1256 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1257 ost->quality / (float)FF_QP2LAMBDA);
1259 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1260 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1262 fprintf(vstats_file,"f_size= %6d ", frame_size);
1263 /* compute pts value */
1264 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1268 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1269 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1270 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1271 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1272 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1276 static void finish_output_stream(OutputStream *ost)
1278 OutputFile *of = output_files[ost->file_index];
1281 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1284 for (i = 0; i < of->ctx->nb_streams; i++)
1285 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1290 * Get and encode new output from any of the filtergraphs, without causing
1293 * @return 0 for success, <0 for severe errors
1295 static int reap_filters(int flush)
1297 AVFrame *filtered_frame = NULL;
1300 /* Reap all buffers present in the buffer sinks */
1301 for (i = 0; i < nb_output_streams; i++) {
1302 OutputStream *ost = output_streams[i];
1303 OutputFile *of = output_files[ost->file_index];
1304 AVFilterContext *filter;
1305 AVCodecContext *enc = ost->enc_ctx;
1310 filter = ost->filter->filter;
1312 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1313 return AVERROR(ENOMEM);
1315 filtered_frame = ost->filtered_frame;
1318 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1319 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1320 AV_BUFFERSINK_FLAG_NO_REQUEST);
1322 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1323 av_log(NULL, AV_LOG_WARNING,
1324 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1325 } else if (flush && ret == AVERROR_EOF) {
1326 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1327 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1331 if (ost->finished) {
1332 av_frame_unref(filtered_frame);
1335 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1336 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1337 AVRational tb = enc->time_base;
1338 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1340 tb.den <<= extra_bits;
1342 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1343 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1344 float_pts /= 1 << extra_bits;
1345 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1346 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1348 filtered_frame->pts =
1349 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1350 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1352 //if (ost->source_index >= 0)
1353 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1355 switch (filter->inputs[0]->type) {
1356 case AVMEDIA_TYPE_VIDEO:
1357 if (!ost->frame_aspect_ratio.num)
1358 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1361 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1362 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1364 enc->time_base.num, enc->time_base.den);
1367 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1369 case AVMEDIA_TYPE_AUDIO:
1370 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1371 enc->channels != av_frame_get_channels(filtered_frame)) {
1372 av_log(NULL, AV_LOG_ERROR,
1373 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1376 do_audio_out(of->ctx, ost, filtered_frame);
1379 // TODO support subtitle filters
1383 av_frame_unref(filtered_frame);
1390 static void print_final_stats(int64_t total_size)
1392 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1393 uint64_t subtitle_size = 0;
1394 uint64_t data_size = 0;
1395 float percent = -1.0;
1399 for (i = 0; i < nb_output_streams; i++) {
1400 OutputStream *ost = output_streams[i];
1401 switch (ost->enc_ctx->codec_type) {
1402 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1403 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1404 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1405 default: other_size += ost->data_size; break;
1407 extra_size += ost->enc_ctx->extradata_size;
1408 data_size += ost->data_size;
1409 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1410 != AV_CODEC_FLAG_PASS1)
1414 if (data_size && total_size>0 && total_size >= data_size)
1415 percent = 100.0 * (total_size - data_size) / data_size;
1417 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1418 video_size / 1024.0,
1419 audio_size / 1024.0,
1420 subtitle_size / 1024.0,
1421 other_size / 1024.0,
1422 extra_size / 1024.0);
1424 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1426 av_log(NULL, AV_LOG_INFO, "unknown");
1427 av_log(NULL, AV_LOG_INFO, "\n");
1429 /* print verbose per-stream stats */
1430 for (i = 0; i < nb_input_files; i++) {
1431 InputFile *f = input_files[i];
1432 uint64_t total_packets = 0, total_size = 0;
1434 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1435 i, f->ctx->filename);
1437 for (j = 0; j < f->nb_streams; j++) {
1438 InputStream *ist = input_streams[f->ist_index + j];
1439 enum AVMediaType type = ist->dec_ctx->codec_type;
1441 total_size += ist->data_size;
1442 total_packets += ist->nb_packets;
1444 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1445 i, j, media_type_string(type));
1446 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1447 ist->nb_packets, ist->data_size);
1449 if (ist->decoding_needed) {
1450 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1451 ist->frames_decoded);
1452 if (type == AVMEDIA_TYPE_AUDIO)
1453 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1454 av_log(NULL, AV_LOG_VERBOSE, "; ");
1457 av_log(NULL, AV_LOG_VERBOSE, "\n");
1460 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1461 total_packets, total_size);
1464 for (i = 0; i < nb_output_files; i++) {
1465 OutputFile *of = output_files[i];
1466 uint64_t total_packets = 0, total_size = 0;
1468 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1469 i, of->ctx->filename);
1471 for (j = 0; j < of->ctx->nb_streams; j++) {
1472 OutputStream *ost = output_streams[of->ost_index + j];
1473 enum AVMediaType type = ost->enc_ctx->codec_type;
1475 total_size += ost->data_size;
1476 total_packets += ost->packets_written;
1478 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1479 i, j, media_type_string(type));
1480 if (ost->encoding_needed) {
1481 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1482 ost->frames_encoded);
1483 if (type == AVMEDIA_TYPE_AUDIO)
1484 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1485 av_log(NULL, AV_LOG_VERBOSE, "; ");
1488 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1489 ost->packets_written, ost->data_size);
1491 av_log(NULL, AV_LOG_VERBOSE, "\n");
1494 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1495 total_packets, total_size);
1497 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1498 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1500 av_log(NULL, AV_LOG_WARNING, "\n");
1502 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1507 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1510 AVBPrint buf_script;
1512 AVFormatContext *oc;
1514 AVCodecContext *enc;
1515 int frame_number, vid, i;
1518 int64_t pts = INT64_MIN + 1;
1519 static int64_t last_time = -1;
1520 static int qp_histogram[52];
1521 int hours, mins, secs, us;
1525 if (!print_stats && !is_last_report && !progress_avio)
1528 if (!is_last_report) {
1529 if (last_time == -1) {
1530 last_time = cur_time;
1533 if ((cur_time - last_time) < 500000)
1535 last_time = cur_time;
1538 t = (cur_time-timer_start) / 1000000.0;
1541 oc = output_files[0]->ctx;
1543 total_size = avio_size(oc->pb);
1544 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1545 total_size = avio_tell(oc->pb);
1549 av_bprint_init(&buf_script, 0, 1);
1550 for (i = 0; i < nb_output_streams; i++) {
1552 ost = output_streams[i];
1554 if (!ost->stream_copy)
1555 q = ost->quality / (float) FF_QP2LAMBDA;
1557 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1558 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1559 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560 ost->file_index, ost->index, q);
1562 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1565 frame_number = ost->frame_number;
1566 fps = t > 1 ? frame_number / t : 0;
1567 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1568 frame_number, fps < 9.95, fps, q);
1569 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1570 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1571 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1572 ost->file_index, ost->index, q);
1574 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1578 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1580 for (j = 0; j < 32; j++)
1581 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1584 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1586 double error, error_sum = 0;
1587 double scale, scale_sum = 0;
1589 char type[3] = { 'Y','U','V' };
1590 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1591 for (j = 0; j < 3; j++) {
1592 if (is_last_report) {
1593 error = enc->error[j];
1594 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1596 error = ost->error[j];
1597 scale = enc->width * enc->height * 255.0 * 255.0;
1603 p = psnr(error / scale);
1604 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1605 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1606 ost->file_index, ost->index, type[j] | 32, p);
1608 p = psnr(error_sum / scale_sum);
1609 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1610 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1611 ost->file_index, ost->index, p);
1615 /* compute min output value */
1616 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1617 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1618 ost->st->time_base, AV_TIME_BASE_Q));
1620 nb_frames_drop += ost->last_dropped;
1623 secs = FFABS(pts) / AV_TIME_BASE;
1624 us = FFABS(pts) % AV_TIME_BASE;
1630 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1631 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1633 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1635 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1636 "size=%8.0fkB time=", total_size / 1024.0);
1638 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1639 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1640 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1641 (100 * us) / AV_TIME_BASE);
1644 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1645 av_bprintf(&buf_script, "bitrate=N/A\n");
1647 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1648 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1651 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1652 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1653 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1654 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1655 hours, mins, secs, us);
1657 if (nb_frames_dup || nb_frames_drop)
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1659 nb_frames_dup, nb_frames_drop);
1660 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1661 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1664 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1665 av_bprintf(&buf_script, "speed=N/A\n");
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1668 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1671 if (print_stats || is_last_report) {
1672 const char end = is_last_report ? '\n' : '\r';
1673 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1674 fprintf(stderr, "%s %c", buf, end);
1676 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1681 if (progress_avio) {
1682 av_bprintf(&buf_script, "progress=%s\n",
1683 is_last_report ? "end" : "continue");
1684 avio_write(progress_avio, buf_script.str,
1685 FFMIN(buf_script.len, buf_script.size - 1));
1686 avio_flush(progress_avio);
1687 av_bprint_finalize(&buf_script, NULL);
1688 if (is_last_report) {
1689 if ((ret = avio_closep(&progress_avio)) < 0)
1690 av_log(NULL, AV_LOG_ERROR,
1691 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1696 print_final_stats(total_size);
1699 static void flush_encoders(void)
1703 for (i = 0; i < nb_output_streams; i++) {
1704 OutputStream *ost = output_streams[i];
1705 AVCodecContext *enc = ost->enc_ctx;
1706 AVFormatContext *os = output_files[ost->file_index]->ctx;
1707 int stop_encoding = 0;
1709 if (!ost->encoding_needed)
1712 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1714 #if FF_API_LAVF_FMT_RAWPICTURE
1715 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1720 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1723 switch (enc->codec_type) {
1724 case AVMEDIA_TYPE_AUDIO:
1725 encode = avcodec_encode_audio2;
1728 case AVMEDIA_TYPE_VIDEO:
1729 encode = avcodec_encode_video2;
1740 av_init_packet(&pkt);
1744 update_benchmark(NULL);
1745 ret = encode(enc, &pkt, NULL, &got_packet);
1746 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1748 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1753 if (ost->logfile && enc->stats_out) {
1754 fprintf(ost->logfile, "%s", enc->stats_out);
1760 if (ost->finished & MUXER_FINISHED) {
1761 av_packet_unref(&pkt);
1764 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1765 pkt_size = pkt.size;
1766 write_frame(os, &pkt, ost);
1767 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1768 do_video_stats(ost, pkt_size);
1779 * Check whether a packet from ist should be written into ost at this time
1781 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1783 OutputFile *of = output_files[ost->file_index];
1784 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1786 if (ost->source_index != ist_index)
1792 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1798 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1800 OutputFile *of = output_files[ost->file_index];
1801 InputFile *f = input_files [ist->file_index];
1802 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1803 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1807 av_init_packet(&opkt);
1809 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1810 !ost->copy_initial_nonkeyframes)
1813 if (!ost->frame_number && !ost->copy_prior_start) {
1814 int64_t comp_start = start_time;
1815 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1816 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1817 if (pkt->pts == AV_NOPTS_VALUE ?
1818 ist->pts < comp_start :
1819 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1823 if (of->recording_time != INT64_MAX &&
1824 ist->pts >= of->recording_time + start_time) {
1825 close_output_stream(ost);
1829 if (f->recording_time != INT64_MAX) {
1830 start_time = f->ctx->start_time;
1831 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1832 start_time += f->start_time;
1833 if (ist->pts >= f->recording_time + start_time) {
1834 close_output_stream(ost);
1839 /* force the input stream PTS */
1840 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1843 if (pkt->pts != AV_NOPTS_VALUE)
1844 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1846 opkt.pts = AV_NOPTS_VALUE;
1848 if (pkt->dts == AV_NOPTS_VALUE)
1849 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1851 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1852 opkt.dts -= ost_tb_start_time;
1854 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1855 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1857 duration = ist->dec_ctx->frame_size;
1858 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1859 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1860 ost->st->time_base) - ost_tb_start_time;
1863 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1864 opkt.flags = pkt->flags;
1865 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1866 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1867 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1868 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1869 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1871 int ret = av_parser_change(ost->parser, ost->st->codec,
1872 &opkt.data, &opkt.size,
1873 pkt->data, pkt->size,
1874 pkt->flags & AV_PKT_FLAG_KEY);
1876 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1881 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1886 opkt.data = pkt->data;
1887 opkt.size = pkt->size;
1889 av_copy_packet_side_data(&opkt, pkt);
1891 #if FF_API_LAVF_FMT_RAWPICTURE
1892 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1893 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1894 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1895 /* store AVPicture in AVPacket, as expected by the output format */
1896 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1898 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1902 opkt.data = (uint8_t *)&pict;
1903 opkt.size = sizeof(AVPicture);
1904 opkt.flags |= AV_PKT_FLAG_KEY;
1908 write_frame(of->ctx, &opkt, ost);
1911 int guess_input_channel_layout(InputStream *ist)
1913 AVCodecContext *dec = ist->dec_ctx;
1915 if (!dec->channel_layout) {
1916 char layout_name[256];
1918 if (dec->channels > ist->guess_layout_max)
1920 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1921 if (!dec->channel_layout)
1923 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1924 dec->channels, dec->channel_layout);
1925 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1926 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1931 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1933 if (*got_output || ret<0)
1934 decode_error_stat[ret<0] ++;
1936 if (ret < 0 && exit_on_error)
1939 if (exit_on_error && *got_output && ist) {
1940 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1941 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1947 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1949 AVFrame *decoded_frame, *f;
1950 AVCodecContext *avctx = ist->dec_ctx;
1951 int i, ret, err = 0, resample_changed;
1952 AVRational decoded_frame_tb;
1954 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1955 return AVERROR(ENOMEM);
1956 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1957 return AVERROR(ENOMEM);
1958 decoded_frame = ist->decoded_frame;
1960 update_benchmark(NULL);
1961 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1962 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1964 if (ret >= 0 && avctx->sample_rate <= 0) {
1965 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1966 ret = AVERROR_INVALIDDATA;
1969 check_decode_result(ist, got_output, ret);
1971 if (!*got_output || ret < 0)
1974 ist->samples_decoded += decoded_frame->nb_samples;
1975 ist->frames_decoded++;
1978 /* increment next_dts to use for the case where the input stream does not
1979 have timestamps or there are multiple frames in the packet */
1980 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1982 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1986 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1987 ist->resample_channels != avctx->channels ||
1988 ist->resample_channel_layout != decoded_frame->channel_layout ||
1989 ist->resample_sample_rate != decoded_frame->sample_rate;
1990 if (resample_changed) {
1991 char layout1[64], layout2[64];
1993 if (!guess_input_channel_layout(ist)) {
1994 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1995 "layout for Input Stream #%d.%d\n", ist->file_index,
1999 decoded_frame->channel_layout = avctx->channel_layout;
2001 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2002 ist->resample_channel_layout);
2003 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2004 decoded_frame->channel_layout);
2006 av_log(NULL, AV_LOG_INFO,
2007 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2008 ist->file_index, ist->st->index,
2009 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2010 ist->resample_channels, layout1,
2011 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2012 avctx->channels, layout2);
2014 ist->resample_sample_fmt = decoded_frame->format;
2015 ist->resample_sample_rate = decoded_frame->sample_rate;
2016 ist->resample_channel_layout = decoded_frame->channel_layout;
2017 ist->resample_channels = avctx->channels;
2019 for (i = 0; i < nb_filtergraphs; i++)
2020 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2021 FilterGraph *fg = filtergraphs[i];
2022 if (configure_filtergraph(fg) < 0) {
2023 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2029 /* if the decoder provides a pts, use it instead of the last packet pts.
2030 the decoder could be delaying output by a packet or more. */
2031 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2032 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2033 decoded_frame_tb = avctx->time_base;
2034 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2035 decoded_frame->pts = decoded_frame->pkt_pts;
2036 decoded_frame_tb = ist->st->time_base;
2037 } else if (pkt->pts != AV_NOPTS_VALUE) {
2038 decoded_frame->pts = pkt->pts;
2039 decoded_frame_tb = ist->st->time_base;
2041 decoded_frame->pts = ist->dts;
2042 decoded_frame_tb = AV_TIME_BASE_Q;
2044 pkt->pts = AV_NOPTS_VALUE;
2045 if (decoded_frame->pts != AV_NOPTS_VALUE)
2046 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2047 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2048 (AVRational){1, avctx->sample_rate});
2049 ist->nb_samples = decoded_frame->nb_samples;
2050 for (i = 0; i < ist->nb_filters; i++) {
2051 if (i < ist->nb_filters - 1) {
2052 f = ist->filter_frame;
2053 err = av_frame_ref(f, decoded_frame);
2058 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2059 AV_BUFFERSRC_FLAG_PUSH);
2060 if (err == AVERROR_EOF)
2061 err = 0; /* ignore */
2065 decoded_frame->pts = AV_NOPTS_VALUE;
2067 av_frame_unref(ist->filter_frame);
2068 av_frame_unref(decoded_frame);
2069 return err < 0 ? err : ret;
2072 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2074 AVFrame *decoded_frame, *f;
2075 int i, ret = 0, err = 0, resample_changed;
2076 int64_t best_effort_timestamp;
2077 AVRational *frame_sample_aspect;
2079 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2080 return AVERROR(ENOMEM);
2081 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2082 return AVERROR(ENOMEM);
2083 decoded_frame = ist->decoded_frame;
2084 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2086 update_benchmark(NULL);
2087 ret = avcodec_decode_video2(ist->dec_ctx,
2088 decoded_frame, got_output, pkt);
2089 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2091 // The following line may be required in some cases where there is no parser
2092 // or the parser does not has_b_frames correctly
2093 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2094 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2095 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2097 av_log(ist->dec_ctx, AV_LOG_WARNING,
2098 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2099 "If you want to help, upload a sample "
2100 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2101 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2102 ist->dec_ctx->has_b_frames,
2103 ist->st->codec->has_b_frames);
2106 check_decode_result(ist, got_output, ret);
2108 if (*got_output && ret >= 0) {
2109 if (ist->dec_ctx->width != decoded_frame->width ||
2110 ist->dec_ctx->height != decoded_frame->height ||
2111 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2112 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2113 decoded_frame->width,
2114 decoded_frame->height,
2115 decoded_frame->format,
2116 ist->dec_ctx->width,
2117 ist->dec_ctx->height,
2118 ist->dec_ctx->pix_fmt);
2122 if (!*got_output || ret < 0)
2125 if(ist->top_field_first>=0)
2126 decoded_frame->top_field_first = ist->top_field_first;
2128 ist->frames_decoded++;
2130 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2131 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2135 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2137 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2138 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2139 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2141 if (ts != AV_NOPTS_VALUE)
2142 ist->next_pts = ist->pts = ts;
2146 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2147 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2148 ist->st->index, av_ts2str(decoded_frame->pts),
2149 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2150 best_effort_timestamp,
2151 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2152 decoded_frame->key_frame, decoded_frame->pict_type,
2153 ist->st->time_base.num, ist->st->time_base.den);
2158 if (ist->st->sample_aspect_ratio.num)
2159 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2161 resample_changed = ist->resample_width != decoded_frame->width ||
2162 ist->resample_height != decoded_frame->height ||
2163 ist->resample_pix_fmt != decoded_frame->format;
2164 if (resample_changed) {
2165 av_log(NULL, AV_LOG_INFO,
2166 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2167 ist->file_index, ist->st->index,
2168 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2169 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2171 ist->resample_width = decoded_frame->width;
2172 ist->resample_height = decoded_frame->height;
2173 ist->resample_pix_fmt = decoded_frame->format;
2175 for (i = 0; i < nb_filtergraphs; i++) {
2176 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2177 configure_filtergraph(filtergraphs[i]) < 0) {
2178 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2184 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2185 for (i = 0; i < ist->nb_filters; i++) {
2186 if (!frame_sample_aspect->num)
2187 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2189 if (i < ist->nb_filters - 1) {
2190 f = ist->filter_frame;
2191 err = av_frame_ref(f, decoded_frame);
2196 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2197 if (ret == AVERROR_EOF) {
2198 ret = 0; /* ignore */
2199 } else if (ret < 0) {
2200 av_log(NULL, AV_LOG_FATAL,
2201 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2207 av_frame_unref(ist->filter_frame);
2208 av_frame_unref(decoded_frame);
2209 return err < 0 ? err : ret;
2212 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2214 AVSubtitle subtitle;
2215 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2216 &subtitle, got_output, pkt);
2218 check_decode_result(NULL, got_output, ret);
2220 if (ret < 0 || !*got_output) {
2222 sub2video_flush(ist);
2226 if (ist->fix_sub_duration) {
2228 if (ist->prev_sub.got_output) {
2229 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2230 1000, AV_TIME_BASE);
2231 if (end < ist->prev_sub.subtitle.end_display_time) {
2232 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2233 "Subtitle duration reduced from %d to %d%s\n",
2234 ist->prev_sub.subtitle.end_display_time, end,
2235 end <= 0 ? ", dropping it" : "");
2236 ist->prev_sub.subtitle.end_display_time = end;
2239 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2240 FFSWAP(int, ret, ist->prev_sub.ret);
2241 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2249 sub2video_update(ist, &subtitle);
2251 if (!subtitle.num_rects)
2254 ist->frames_decoded++;
2256 for (i = 0; i < nb_output_streams; i++) {
2257 OutputStream *ost = output_streams[i];
2259 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2260 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2263 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2267 avsubtitle_free(&subtitle);
2271 static int send_filter_eof(InputStream *ist)
2274 for (i = 0; i < ist->nb_filters; i++) {
2275 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2282 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2283 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2289 if (!ist->saw_first_ts) {
2290 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2292 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2293 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2294 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2296 ist->saw_first_ts = 1;
2299 if (ist->next_dts == AV_NOPTS_VALUE)
2300 ist->next_dts = ist->dts;
2301 if (ist->next_pts == AV_NOPTS_VALUE)
2302 ist->next_pts = ist->pts;
2306 av_init_packet(&avpkt);
2314 if (pkt->dts != AV_NOPTS_VALUE) {
2315 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2316 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2317 ist->next_pts = ist->pts = ist->dts;
2320 // while we have more to decode or while the decoder did output something on EOF
2321 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2325 ist->pts = ist->next_pts;
2326 ist->dts = ist->next_dts;
2328 if (avpkt.size && avpkt.size != pkt->size &&
2329 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2330 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2331 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2332 ist->showed_multi_packet_warning = 1;
2335 switch (ist->dec_ctx->codec_type) {
2336 case AVMEDIA_TYPE_AUDIO:
2337 ret = decode_audio (ist, &avpkt, &got_output);
2339 case AVMEDIA_TYPE_VIDEO:
2340 ret = decode_video (ist, &avpkt, &got_output);
2341 if (avpkt.duration) {
2342 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2343 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2344 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2345 duration = ((int64_t)AV_TIME_BASE *
2346 ist->dec_ctx->framerate.den * ticks) /
2347 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2351 if(ist->dts != AV_NOPTS_VALUE && duration) {
2352 ist->next_dts += duration;
2354 ist->next_dts = AV_NOPTS_VALUE;
2357 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2359 case AVMEDIA_TYPE_SUBTITLE:
2360 ret = transcode_subtitles(ist, &avpkt, &got_output);
2367 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2368 ist->file_index, ist->st->index, av_err2str(ret));
2375 avpkt.pts= AV_NOPTS_VALUE;
2377 // touch data and size only if not EOF
2379 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2387 if (got_output && !pkt)
2391 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2392 /* except when looping we need to flush but not to send an EOF */
2393 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2394 int ret = send_filter_eof(ist);
2396 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2401 /* handle stream copy */
2402 if (!ist->decoding_needed) {
2403 ist->dts = ist->next_dts;
2404 switch (ist->dec_ctx->codec_type) {
2405 case AVMEDIA_TYPE_AUDIO:
2406 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2407 ist->dec_ctx->sample_rate;
2409 case AVMEDIA_TYPE_VIDEO:
2410 if (ist->framerate.num) {
2411 // TODO: Remove work-around for c99-to-c89 issue 7
2412 AVRational time_base_q = AV_TIME_BASE_Q;
2413 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2414 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2415 } else if (pkt->duration) {
2416 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2417 } else if(ist->dec_ctx->framerate.num != 0) {
2418 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2419 ist->next_dts += ((int64_t)AV_TIME_BASE *
2420 ist->dec_ctx->framerate.den * ticks) /
2421 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2425 ist->pts = ist->dts;
2426 ist->next_pts = ist->next_dts;
2428 for (i = 0; pkt && i < nb_output_streams; i++) {
2429 OutputStream *ost = output_streams[i];
2431 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2434 do_streamcopy(ist, ost, pkt);
2440 static void print_sdp(void)
2445 AVIOContext *sdp_pb;
2446 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2450 for (i = 0, j = 0; i < nb_output_files; i++) {
2451 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2452 avc[j] = output_files[i]->ctx;
2460 av_sdp_create(avc, j, sdp, sizeof(sdp));
2462 if (!sdp_filename) {
2463 printf("SDP:\n%s\n", sdp);
2466 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2467 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2469 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2470 avio_closep(&sdp_pb);
2471 av_freep(&sdp_filename);
2479 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2482 for (i = 0; hwaccels[i].name; i++)
2483 if (hwaccels[i].pix_fmt == pix_fmt)
2484 return &hwaccels[i];
2488 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2490 InputStream *ist = s->opaque;
2491 const enum AVPixelFormat *p;
2494 for (p = pix_fmts; *p != -1; p++) {
2495 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2496 const HWAccel *hwaccel;
2498 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2501 hwaccel = get_hwaccel(*p);
2503 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2504 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2507 ret = hwaccel->init(s);
2509 if (ist->hwaccel_id == hwaccel->id) {
2510 av_log(NULL, AV_LOG_FATAL,
2511 "%s hwaccel requested for input stream #%d:%d, "
2512 "but cannot be initialized.\n", hwaccel->name,
2513 ist->file_index, ist->st->index);
2514 return AV_PIX_FMT_NONE;
2518 ist->active_hwaccel_id = hwaccel->id;
2519 ist->hwaccel_pix_fmt = *p;
2526 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2528 InputStream *ist = s->opaque;
2530 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2531 return ist->hwaccel_get_buffer(s, frame, flags);
2533 return avcodec_default_get_buffer2(s, frame, flags);
2536 static int init_input_stream(int ist_index, char *error, int error_len)
2539 InputStream *ist = input_streams[ist_index];
2541 if (ist->decoding_needed) {
2542 AVCodec *codec = ist->dec;
2544 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2545 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2546 return AVERROR(EINVAL);
2549 ist->dec_ctx->opaque = ist;
2550 ist->dec_ctx->get_format = get_format;
2551 ist->dec_ctx->get_buffer2 = get_buffer;
2552 ist->dec_ctx->thread_safe_callbacks = 1;
2554 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2555 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2556 (ist->decoding_needed & DECODING_FOR_OST)) {
2557 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2558 if (ist->decoding_needed & DECODING_FOR_FILTER)
2559 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2562 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2564 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2565 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2566 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2567 if (ret == AVERROR_EXPERIMENTAL)
2568 abort_codec_experimental(codec, 0);
2570 snprintf(error, error_len,
2571 "Error while opening decoder for input stream "
2573 ist->file_index, ist->st->index, av_err2str(ret));
2576 assert_avoptions(ist->decoder_opts);
2579 ist->next_pts = AV_NOPTS_VALUE;
2580 ist->next_dts = AV_NOPTS_VALUE;
2585 static InputStream *get_input_stream(OutputStream *ost)
2587 if (ost->source_index >= 0)
2588 return input_streams[ost->source_index];
2592 static int compare_int64(const void *a, const void *b)
2594 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2597 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2601 if (ost->encoding_needed) {
2602 AVCodec *codec = ost->enc;
2603 AVCodecContext *dec = NULL;
2606 if ((ist = get_input_stream(ost)))
2608 if (dec && dec->subtitle_header) {
2609 /* ASS code assumes this buffer is null terminated so add extra byte. */
2610 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2611 if (!ost->enc_ctx->subtitle_header)
2612 return AVERROR(ENOMEM);
2613 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2614 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2616 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2617 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2618 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2620 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2621 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2622 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2624 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2625 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2626 if (!ost->enc_ctx->hw_frames_ctx)
2627 return AVERROR(ENOMEM);
2630 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2631 if (ret == AVERROR_EXPERIMENTAL)
2632 abort_codec_experimental(codec, 1);
2633 snprintf(error, error_len,
2634 "Error while opening encoder for output stream #%d:%d - "
2635 "maybe incorrect parameters such as bit_rate, rate, width or height",
2636 ost->file_index, ost->index);
2639 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2640 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2641 av_buffersink_set_frame_size(ost->filter->filter,
2642 ost->enc_ctx->frame_size);
2643 assert_avoptions(ost->encoder_opts);
2644 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2645 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2646 " It takes bits/s as argument, not kbits/s\n");
2648 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2650 av_log(NULL, AV_LOG_FATAL,
2651 "Error initializing the output stream codec context.\n");
2655 if (ost->enc_ctx->nb_coded_side_data) {
2658 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2659 sizeof(*ost->st->side_data));
2660 if (!ost->st->side_data)
2661 return AVERROR(ENOMEM);
2663 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2664 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2665 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2667 sd_dst->data = av_malloc(sd_src->size);
2669 return AVERROR(ENOMEM);
2670 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2671 sd_dst->size = sd_src->size;
2672 sd_dst->type = sd_src->type;
2673 ost->st->nb_side_data++;
2677 // copy timebase while removing common factors
2678 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2679 ost->st->codec->codec= ost->enc_ctx->codec;
2681 ret = av_opt_set_dict(ost->st->codec, &ost->encoder_opts);
2683 av_log(NULL, AV_LOG_FATAL,
2684 "Error setting up codec context options.\n");
2687 // copy timebase while removing common factors
2688 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2694 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2695 AVCodecContext *avctx)
2698 int n = 1, i, size, index = 0;
2701 for (p = kf; *p; p++)
2705 pts = av_malloc_array(size, sizeof(*pts));
2707 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2712 for (i = 0; i < n; i++) {
2713 char *next = strchr(p, ',');
2718 if (!memcmp(p, "chapters", 8)) {
2720 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2723 if (avf->nb_chapters > INT_MAX - size ||
2724 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2726 av_log(NULL, AV_LOG_FATAL,
2727 "Could not allocate forced key frames array.\n");
2730 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2731 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2733 for (j = 0; j < avf->nb_chapters; j++) {
2734 AVChapter *c = avf->chapters[j];
2735 av_assert1(index < size);
2736 pts[index++] = av_rescale_q(c->start, c->time_base,
2737 avctx->time_base) + t;
2742 t = parse_time_or_die("force_key_frames", p, 1);
2743 av_assert1(index < size);
2744 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2751 av_assert0(index == size);
2752 qsort(pts, size, sizeof(*pts), compare_int64);
2753 ost->forced_kf_count = size;
2754 ost->forced_kf_pts = pts;
2757 static void report_new_stream(int input_index, AVPacket *pkt)
2759 InputFile *file = input_files[input_index];
2760 AVStream *st = file->ctx->streams[pkt->stream_index];
2762 if (pkt->stream_index < file->nb_streams_warn)
2764 av_log(file->ctx, AV_LOG_WARNING,
2765 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2766 av_get_media_type_string(st->codec->codec_type),
2767 input_index, pkt->stream_index,
2768 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2769 file->nb_streams_warn = pkt->stream_index + 1;
2772 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2774 AVDictionaryEntry *e;
2776 uint8_t *encoder_string;
2777 int encoder_string_len;
2778 int format_flags = 0;
2779 int codec_flags = 0;
2781 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2784 e = av_dict_get(of->opts, "fflags", NULL, 0);
2786 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2789 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2791 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2793 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2796 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2799 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2800 encoder_string = av_mallocz(encoder_string_len);
2801 if (!encoder_string)
2804 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2805 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2807 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2808 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2809 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2810 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2813 static int transcode_init(void)
2815 int ret = 0, i, j, k;
2816 AVFormatContext *oc;
2819 char error[1024] = {0};
2822 for (i = 0; i < nb_filtergraphs; i++) {
2823 FilterGraph *fg = filtergraphs[i];
2824 for (j = 0; j < fg->nb_outputs; j++) {
2825 OutputFilter *ofilter = fg->outputs[j];
2826 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2828 if (fg->nb_inputs != 1)
2830 for (k = nb_input_streams-1; k >= 0 ; k--)
2831 if (fg->inputs[0]->ist == input_streams[k])
2833 ofilter->ost->source_index = k;
2837 /* init framerate emulation */
2838 for (i = 0; i < nb_input_files; i++) {
2839 InputFile *ifile = input_files[i];
2840 if (ifile->rate_emu)
2841 for (j = 0; j < ifile->nb_streams; j++)
2842 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2845 /* for each output stream, we compute the right encoding parameters */
2846 for (i = 0; i < nb_output_streams; i++) {
2847 AVCodecContext *enc_ctx;
2848 AVCodecContext *dec_ctx = NULL;
2849 ost = output_streams[i];
2850 oc = output_files[ost->file_index]->ctx;
2851 ist = get_input_stream(ost);
2853 if (ost->attachment_filename)
2856 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2859 dec_ctx = ist->dec_ctx;
2861 ost->st->disposition = ist->st->disposition;
2862 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2863 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2865 for (j=0; j<oc->nb_streams; j++) {
2866 AVStream *st = oc->streams[j];
2867 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2870 if (j == oc->nb_streams)
2871 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2872 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2875 if (ost->stream_copy) {
2877 uint64_t extra_size;
2879 av_assert0(ist && !ost->filter);
2881 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2883 if (extra_size > INT_MAX) {
2884 return AVERROR(EINVAL);
2887 /* if stream_copy is selected, no need to decode or encode */
2888 enc_ctx->codec_id = dec_ctx->codec_id;
2889 enc_ctx->codec_type = dec_ctx->codec_type;
2891 if (!enc_ctx->codec_tag) {
2892 unsigned int codec_tag;
2893 if (!oc->oformat->codec_tag ||
2894 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2895 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2896 enc_ctx->codec_tag = dec_ctx->codec_tag;
2899 enc_ctx->bit_rate = dec_ctx->bit_rate;
2900 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2901 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2902 enc_ctx->field_order = dec_ctx->field_order;
2903 if (dec_ctx->extradata_size) {
2904 enc_ctx->extradata = av_mallocz(extra_size);
2905 if (!enc_ctx->extradata) {
2906 return AVERROR(ENOMEM);
2908 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2910 enc_ctx->extradata_size= dec_ctx->extradata_size;
2911 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2913 enc_ctx->time_base = ist->st->time_base;
2915 * Avi is a special case here because it supports variable fps but
2916 * having the fps and timebase differe significantly adds quite some
2919 if(!strcmp(oc->oformat->name, "avi")) {
2920 if ( copy_tb<0 && ist->st->r_frame_rate.num
2921 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2922 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2923 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2924 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2926 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2927 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2928 enc_ctx->ticks_per_frame = 2;
2929 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2930 && av_q2d(ist->st->time_base) < 1.0/500
2932 enc_ctx->time_base = dec_ctx->time_base;
2933 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2934 enc_ctx->time_base.den *= 2;
2935 enc_ctx->ticks_per_frame = 2;
2937 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2938 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2939 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2940 && strcmp(oc->oformat->name, "f4v")
2942 if( copy_tb<0 && dec_ctx->time_base.den
2943 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2944 && av_q2d(ist->st->time_base) < 1.0/500
2946 enc_ctx->time_base = dec_ctx->time_base;
2947 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2950 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2951 && dec_ctx->time_base.num < dec_ctx->time_base.den
2952 && dec_ctx->time_base.num > 0
2953 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2954 enc_ctx->time_base = dec_ctx->time_base;
2957 if (!ost->frame_rate.num)
2958 ost->frame_rate = ist->framerate;
2959 if(ost->frame_rate.num)
2960 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2962 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2963 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2965 if (ist->st->nb_side_data) {
2966 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2967 sizeof(*ist->st->side_data));
2968 if (!ost->st->side_data)
2969 return AVERROR(ENOMEM);
2971 ost->st->nb_side_data = 0;
2972 for (j = 0; j < ist->st->nb_side_data; j++) {
2973 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2974 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2976 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2979 sd_dst->data = av_malloc(sd_src->size);
2981 return AVERROR(ENOMEM);
2982 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2983 sd_dst->size = sd_src->size;
2984 sd_dst->type = sd_src->type;
2985 ost->st->nb_side_data++;
2989 ost->parser = av_parser_init(enc_ctx->codec_id);
2991 switch (enc_ctx->codec_type) {
2992 case AVMEDIA_TYPE_AUDIO:
2993 if (audio_volume != 256) {
2994 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2997 enc_ctx->channel_layout = dec_ctx->channel_layout;
2998 enc_ctx->sample_rate = dec_ctx->sample_rate;
2999 enc_ctx->channels = dec_ctx->channels;
3000 enc_ctx->frame_size = dec_ctx->frame_size;
3001 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
3002 enc_ctx->block_align = dec_ctx->block_align;
3003 enc_ctx->initial_padding = dec_ctx->delay;
3004 enc_ctx->profile = dec_ctx->profile;
3005 #if FF_API_AUDIOENC_DELAY
3006 enc_ctx->delay = dec_ctx->delay;
3008 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
3009 enc_ctx->block_align= 0;
3010 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3011 enc_ctx->block_align= 0;
3013 case AVMEDIA_TYPE_VIDEO:
3014 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3015 enc_ctx->colorspace = dec_ctx->colorspace;
3016 enc_ctx->color_range = dec_ctx->color_range;
3017 enc_ctx->color_primaries = dec_ctx->color_primaries;
3018 enc_ctx->color_trc = dec_ctx->color_trc;
3019 enc_ctx->width = dec_ctx->width;
3020 enc_ctx->height = dec_ctx->height;
3021 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3022 enc_ctx->profile = dec_ctx->profile;
3023 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3025 av_mul_q(ost->frame_aspect_ratio,
3026 (AVRational){ enc_ctx->height, enc_ctx->width });
3027 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3028 "with stream copy may produce invalid files\n");
3030 else if (ist->st->sample_aspect_ratio.num)
3031 sar = ist->st->sample_aspect_ratio;
3033 sar = dec_ctx->sample_aspect_ratio;
3034 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3035 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3036 ost->st->r_frame_rate = ist->st->r_frame_rate;
3038 case AVMEDIA_TYPE_SUBTITLE:
3039 enc_ctx->width = dec_ctx->width;
3040 enc_ctx->height = dec_ctx->height;
3042 case AVMEDIA_TYPE_UNKNOWN:
3043 case AVMEDIA_TYPE_DATA:
3044 case AVMEDIA_TYPE_ATTACHMENT:
3051 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3053 /* should only happen when a default codec is not present. */
3054 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3055 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3056 ret = AVERROR(EINVAL);
3060 set_encoder_id(output_files[ost->file_index], ost);
3063 if (qsv_transcode_init(ost))
3068 if (cuvid_transcode_init(ost))
3073 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3074 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3076 fg = init_simple_filtergraph(ist, ost);
3077 if (configure_filtergraph(fg)) {
3078 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3083 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3084 if (!ost->frame_rate.num)
3085 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3086 if (ist && !ost->frame_rate.num)
3087 ost->frame_rate = ist->framerate;
3088 if (ist && !ost->frame_rate.num)
3089 ost->frame_rate = ist->st->r_frame_rate;
3090 if (ist && !ost->frame_rate.num) {
3091 ost->frame_rate = (AVRational){25, 1};
3092 av_log(NULL, AV_LOG_WARNING,
3094 "about the input framerate is available. Falling "
3095 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3096 "if you want a different framerate.\n",
3097 ost->file_index, ost->index);
3099 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3100 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3101 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3102 ost->frame_rate = ost->enc->supported_framerates[idx];
3104 // reduce frame rate for mpeg4 to be within the spec limits
3105 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3106 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3107 ost->frame_rate.num, ost->frame_rate.den, 65535);
3111 switch (enc_ctx->codec_type) {
3112 case AVMEDIA_TYPE_AUDIO:
3113 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3114 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3115 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3116 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3117 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3119 case AVMEDIA_TYPE_VIDEO:
3120 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3121 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3122 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3123 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3124 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3125 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3126 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3128 for (j = 0; j < ost->forced_kf_count; j++)
3129 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3131 enc_ctx->time_base);
3133 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3134 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3135 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3136 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3137 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3138 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3139 if (!strncmp(ost->enc->name, "libx264", 7) &&
3140 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3141 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3142 av_log(NULL, AV_LOG_WARNING,
3143 "No pixel format specified, %s for H.264 encoding chosen.\n"
3144 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3145 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3146 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3147 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3148 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3149 av_log(NULL, AV_LOG_WARNING,
3150 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3151 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3152 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3153 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3155 ost->st->avg_frame_rate = ost->frame_rate;
3158 enc_ctx->width != dec_ctx->width ||
3159 enc_ctx->height != dec_ctx->height ||
3160 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3161 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3164 if (ost->forced_keyframes) {
3165 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3166 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3167 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3169 av_log(NULL, AV_LOG_ERROR,
3170 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3173 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3174 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3175 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3176 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3178 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3179 // parse it only for static kf timings
3180 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3181 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3185 case AVMEDIA_TYPE_SUBTITLE:
3186 enc_ctx->time_base = (AVRational){1, 1000};
3187 if (!enc_ctx->width) {
3188 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3189 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3192 case AVMEDIA_TYPE_DATA:
3200 if (ost->disposition) {
3201 static const AVOption opts[] = {
3202 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3203 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3204 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3205 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3206 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3207 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3208 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3209 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3210 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3211 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3212 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3213 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3214 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3215 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3218 static const AVClass class = {
3220 .item_name = av_default_item_name,
3222 .version = LIBAVUTIL_VERSION_INT,
3224 const AVClass *pclass = &class;
3226 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3232 /* init input streams */
3233 for (i = 0; i < nb_input_streams; i++)
3234 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3235 for (i = 0; i < nb_output_streams; i++) {
3236 ost = output_streams[i];
3237 avcodec_close(ost->enc_ctx);
3242 /* open each encoder */
3243 for (i = 0; i < nb_output_streams; i++) {
3244 ret = init_output_stream(output_streams[i], error, sizeof(error));
3249 /* discard unused programs */
3250 for (i = 0; i < nb_input_files; i++) {
3251 InputFile *ifile = input_files[i];
3252 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3253 AVProgram *p = ifile->ctx->programs[j];
3254 int discard = AVDISCARD_ALL;
3256 for (k = 0; k < p->nb_stream_indexes; k++)
3257 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3258 discard = AVDISCARD_DEFAULT;
3261 p->discard = discard;
3265 /* open files and write file headers */
3266 for (i = 0; i < nb_output_files; i++) {
3267 oc = output_files[i]->ctx;
3268 oc->interrupt_callback = int_cb;
3269 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3270 snprintf(error, sizeof(error),
3271 "Could not write header for output file #%d "
3272 "(incorrect codec parameters ?): %s",
3273 i, av_err2str(ret));
3274 ret = AVERROR(EINVAL);
3277 // assert_avoptions(output_files[i]->opts);
3278 if (strcmp(oc->oformat->name, "rtp")) {
3284 /* dump the file output parameters - cannot be done before in case
3286 for (i = 0; i < nb_output_files; i++) {
3287 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3290 /* dump the stream mapping */
3291 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3292 for (i = 0; i < nb_input_streams; i++) {
3293 ist = input_streams[i];
3295 for (j = 0; j < ist->nb_filters; j++) {
3296 if (ist->filters[j]->graph->graph_desc) {
3297 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3298 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3299 ist->filters[j]->name);
3300 if (nb_filtergraphs > 1)
3301 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3302 av_log(NULL, AV_LOG_INFO, "\n");
3307 for (i = 0; i < nb_output_streams; i++) {
3308 ost = output_streams[i];
3310 if (ost->attachment_filename) {
3311 /* an attached file */
3312 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3313 ost->attachment_filename, ost->file_index, ost->index);
3317 if (ost->filter && ost->filter->graph->graph_desc) {
3318 /* output from a complex graph */
3319 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3320 if (nb_filtergraphs > 1)
3321 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3323 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3324 ost->index, ost->enc ? ost->enc->name : "?");
3328 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3329 input_streams[ost->source_index]->file_index,
3330 input_streams[ost->source_index]->st->index,
3333 if (ost->sync_ist != input_streams[ost->source_index])
3334 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3335 ost->sync_ist->file_index,
3336 ost->sync_ist->st->index);
3337 if (ost->stream_copy)
3338 av_log(NULL, AV_LOG_INFO, " (copy)");
3340 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3341 const AVCodec *out_codec = ost->enc;
3342 const char *decoder_name = "?";
3343 const char *in_codec_name = "?";
3344 const char *encoder_name = "?";
3345 const char *out_codec_name = "?";
3346 const AVCodecDescriptor *desc;
3349 decoder_name = in_codec->name;
3350 desc = avcodec_descriptor_get(in_codec->id);
3352 in_codec_name = desc->name;
3353 if (!strcmp(decoder_name, in_codec_name))
3354 decoder_name = "native";
3358 encoder_name = out_codec->name;
3359 desc = avcodec_descriptor_get(out_codec->id);
3361 out_codec_name = desc->name;
3362 if (!strcmp(encoder_name, out_codec_name))
3363 encoder_name = "native";
3366 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3367 in_codec_name, decoder_name,
3368 out_codec_name, encoder_name);
3370 av_log(NULL, AV_LOG_INFO, "\n");
3374 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3378 if (sdp_filename || want_sdp) {
3382 transcode_init_done = 1;
3387 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3388 static int need_output(void)
3392 for (i = 0; i < nb_output_streams; i++) {
3393 OutputStream *ost = output_streams[i];
3394 OutputFile *of = output_files[ost->file_index];
3395 AVFormatContext *os = output_files[ost->file_index]->ctx;
3397 if (ost->finished ||
3398 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3400 if (ost->frame_number >= ost->max_frames) {
3402 for (j = 0; j < of->ctx->nb_streams; j++)
3403 close_output_stream(output_streams[of->ost_index + j]);
3414 * Select the output stream to process.
3416 * @return selected output stream, or NULL if none available
3418 static OutputStream *choose_output(void)
3421 int64_t opts_min = INT64_MAX;
3422 OutputStream *ost_min = NULL;
3424 for (i = 0; i < nb_output_streams; i++) {
3425 OutputStream *ost = output_streams[i];
3426 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3427 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3429 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3430 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3432 if (!ost->finished && opts < opts_min) {
3434 ost_min = ost->unavailable ? NULL : ost;
3440 static void set_tty_echo(int on)
3444 if (tcgetattr(0, &tty) == 0) {
3445 if (on) tty.c_lflag |= ECHO;
3446 else tty.c_lflag &= ~ECHO;
3447 tcsetattr(0, TCSANOW, &tty);
3452 static int check_keyboard_interaction(int64_t cur_time)
3455 static int64_t last_time;
3456 if (received_nb_signals)
3457 return AVERROR_EXIT;
3458 /* read_key() returns 0 on EOF */
3459 if(cur_time - last_time >= 100000 && !run_as_daemon){
3461 last_time = cur_time;
3465 return AVERROR_EXIT;
3466 if (key == '+') av_log_set_level(av_log_get_level()+10);
3467 if (key == '-') av_log_set_level(av_log_get_level()-10);
3468 if (key == 's') qp_hist ^= 1;
3471 do_hex_dump = do_pkt_dump = 0;
3472 } else if(do_pkt_dump){
3476 av_log_set_level(AV_LOG_DEBUG);
3478 if (key == 'c' || key == 'C'){
3479 char buf[4096], target[64], command[256], arg[256] = {0};
3482 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3485 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3490 fprintf(stderr, "\n");
3492 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3493 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3494 target, time, command, arg);
3495 for (i = 0; i < nb_filtergraphs; i++) {
3496 FilterGraph *fg = filtergraphs[i];
3499 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3500 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3501 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3502 } else if (key == 'c') {
3503 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3504 ret = AVERROR_PATCHWELCOME;
3506 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3508 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3513 av_log(NULL, AV_LOG_ERROR,
3514 "Parse error, at least 3 arguments were expected, "
3515 "only %d given in string '%s'\n", n, buf);
3518 if (key == 'd' || key == 'D'){
3521 debug = input_streams[0]->st->codec->debug<<1;
3522 if(!debug) debug = 1;
3523 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3530 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3535 fprintf(stderr, "\n");
3536 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3537 fprintf(stderr,"error parsing debug value\n");
3539 for(i=0;i<nb_input_streams;i++) {
3540 input_streams[i]->st->codec->debug = debug;
3542 for(i=0;i<nb_output_streams;i++) {
3543 OutputStream *ost = output_streams[i];
3544 ost->enc_ctx->debug = debug;
3546 if(debug) av_log_set_level(AV_LOG_DEBUG);
3547 fprintf(stderr,"debug=%d\n", debug);
3550 fprintf(stderr, "key function\n"
3551 "? show this help\n"
3552 "+ increase verbosity\n"
3553 "- decrease verbosity\n"
3554 "c Send command to first matching filter supporting it\n"
3555 "C Send/Que command to all matching filters\n"
3556 "D cycle through available debug modes\n"
3557 "h dump packets/hex press to cycle through the 3 states\n"
3559 "s Show QP histogram\n"
3566 static void *input_thread(void *arg)
3569 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3574 ret = av_read_frame(f->ctx, &pkt);
3576 if (ret == AVERROR(EAGAIN)) {
3581 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3584 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3585 if (flags && ret == AVERROR(EAGAIN)) {
3587 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3588 av_log(f->ctx, AV_LOG_WARNING,
3589 "Thread message queue blocking; consider raising the "
3590 "thread_queue_size option (current value: %d)\n",
3591 f->thread_queue_size);
3594 if (ret != AVERROR_EOF)
3595 av_log(f->ctx, AV_LOG_ERROR,
3596 "Unable to send packet to main thread: %s\n",
3598 av_packet_unref(&pkt);
3599 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3607 static void free_input_threads(void)
3611 for (i = 0; i < nb_input_files; i++) {
3612 InputFile *f = input_files[i];
3615 if (!f || !f->in_thread_queue)
3617 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3618 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3619 av_packet_unref(&pkt);
3621 pthread_join(f->thread, NULL);
3623 av_thread_message_queue_free(&f->in_thread_queue);
3627 static int init_input_threads(void)
3631 if (nb_input_files == 1)
3634 for (i = 0; i < nb_input_files; i++) {
3635 InputFile *f = input_files[i];
3637 if (f->ctx->pb ? !f->ctx->pb->seekable :
3638 strcmp(f->ctx->iformat->name, "lavfi"))
3639 f->non_blocking = 1;
3640 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3641 f->thread_queue_size, sizeof(AVPacket));
3645 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3646 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3647 av_thread_message_queue_free(&f->in_thread_queue);
3648 return AVERROR(ret);
3654 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3656 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3658 AV_THREAD_MESSAGE_NONBLOCK : 0);
3662 static int get_input_packet(InputFile *f, AVPacket *pkt)
3666 for (i = 0; i < f->nb_streams; i++) {
3667 InputStream *ist = input_streams[f->ist_index + i];
3668 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3669 int64_t now = av_gettime_relative() - ist->start;
3671 return AVERROR(EAGAIN);
3676 if (nb_input_files > 1)
3677 return get_input_packet_mt(f, pkt);
3679 return av_read_frame(f->ctx, pkt);
3682 static int got_eagain(void)
3685 for (i = 0; i < nb_output_streams; i++)
3686 if (output_streams[i]->unavailable)
3691 static void reset_eagain(void)
3694 for (i = 0; i < nb_input_files; i++)
3695 input_files[i]->eagain = 0;
3696 for (i = 0; i < nb_output_streams; i++)
3697 output_streams[i]->unavailable = 0;
3700 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3701 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3702 AVRational time_base)
3708 return tmp_time_base;
3711 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3714 return tmp_time_base;
3720 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3723 AVCodecContext *avctx;
3724 int i, ret, has_audio = 0;
3725 int64_t duration = 0;
3727 ret = av_seek_frame(is, -1, is->start_time, 0);
3731 for (i = 0; i < ifile->nb_streams; i++) {
3732 ist = input_streams[ifile->ist_index + i];
3733 avctx = ist->dec_ctx;
3736 if (ist->decoding_needed) {
3737 process_input_packet(ist, NULL, 1);
3738 avcodec_flush_buffers(avctx);
3741 /* duration is the length of the last frame in a stream
3742 * when audio stream is present we don't care about
3743 * last video frame length because it's not defined exactly */
3744 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3748 for (i = 0; i < ifile->nb_streams; i++) {
3749 ist = input_streams[ifile->ist_index + i];
3750 avctx = ist->dec_ctx;
3753 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3754 AVRational sample_rate = {1, avctx->sample_rate};
3756 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3760 if (ist->framerate.num) {
3761 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3762 } else if (ist->st->avg_frame_rate.num) {
3763 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3764 } else duration = 1;
3766 if (!ifile->duration)
3767 ifile->time_base = ist->st->time_base;
3768 /* the total duration of the stream, max_pts - min_pts is
3769 * the duration of the stream without the last frame */
3770 duration += ist->max_pts - ist->min_pts;
3771 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3775 if (ifile->loop > 0)
3783 * - 0 -- one packet was read and processed
3784 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3785 * this function should be called again
3786 * - AVERROR_EOF -- this function should not be called again
3788 static int process_input(int file_index)
3790 InputFile *ifile = input_files[file_index];
3791 AVFormatContext *is;
3799 ret = get_input_packet(ifile, &pkt);
3801 if (ret == AVERROR(EAGAIN)) {
3805 if (ret < 0 && ifile->loop) {
3806 if ((ret = seek_to_start(ifile, is)) < 0)
3808 ret = get_input_packet(ifile, &pkt);
3811 if (ret != AVERROR_EOF) {
3812 print_error(is->filename, ret);
3817 for (i = 0; i < ifile->nb_streams; i++) {
3818 ist = input_streams[ifile->ist_index + i];
3819 if (ist->decoding_needed) {
3820 ret = process_input_packet(ist, NULL, 0);
3825 /* mark all outputs that don't go through lavfi as finished */
3826 for (j = 0; j < nb_output_streams; j++) {
3827 OutputStream *ost = output_streams[j];
3829 if (ost->source_index == ifile->ist_index + i &&
3830 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3831 finish_output_stream(ost);
3835 ifile->eof_reached = 1;
3836 return AVERROR(EAGAIN);
3842 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3843 is->streams[pkt.stream_index]);
3845 /* the following test is needed in case new streams appear
3846 dynamically in stream : we ignore them */
3847 if (pkt.stream_index >= ifile->nb_streams) {
3848 report_new_stream(file_index, &pkt);
3849 goto discard_packet;
3852 ist = input_streams[ifile->ist_index + pkt.stream_index];
3854 ist->data_size += pkt.size;
3858 goto discard_packet;
3860 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3861 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3866 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3867 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3868 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3869 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3870 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3871 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3872 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3873 av_ts2str(input_files[ist->file_index]->ts_offset),
3874 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3877 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3878 int64_t stime, stime2;
3879 // Correcting starttime based on the enabled streams
3880 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3881 // so we instead do it here as part of discontinuity handling
3882 if ( ist->next_dts == AV_NOPTS_VALUE
3883 && ifile->ts_offset == -is->start_time
3884 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3885 int64_t new_start_time = INT64_MAX;
3886 for (i=0; i<is->nb_streams; i++) {
3887 AVStream *st = is->streams[i];
3888 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3890 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3892 if (new_start_time > is->start_time) {
3893 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3894 ifile->ts_offset = -new_start_time;
3898 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3899 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3900 ist->wrap_correction_done = 1;
3902 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3903 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3904 ist->wrap_correction_done = 0;
3906 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3907 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3908 ist->wrap_correction_done = 0;
3912 /* add the stream-global side data to the first packet */
3913 if (ist->nb_packets == 1) {
3914 if (ist->st->nb_side_data)
3915 av_packet_split_side_data(&pkt);
3916 for (i = 0; i < ist->st->nb_side_data; i++) {
3917 AVPacketSideData *src_sd = &ist->st->side_data[i];
3920 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3922 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3925 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3929 memcpy(dst_data, src_sd->data, src_sd->size);
3933 if (pkt.dts != AV_NOPTS_VALUE)
3934 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3935 if (pkt.pts != AV_NOPTS_VALUE)
3936 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3938 if (pkt.pts != AV_NOPTS_VALUE)
3939 pkt.pts *= ist->ts_scale;
3940 if (pkt.dts != AV_NOPTS_VALUE)
3941 pkt.dts *= ist->ts_scale;
3943 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3944 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3945 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3946 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3947 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3948 int64_t delta = pkt_dts - ifile->last_ts;
3949 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3950 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3951 ifile->ts_offset -= delta;
3952 av_log(NULL, AV_LOG_DEBUG,
3953 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3954 delta, ifile->ts_offset);
3955 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3956 if (pkt.pts != AV_NOPTS_VALUE)
3957 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3961 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3962 if (pkt.pts != AV_NOPTS_VALUE) {
3963 pkt.pts += duration;
3964 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3965 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3968 if (pkt.dts != AV_NOPTS_VALUE)
3969 pkt.dts += duration;
3971 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3972 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3973 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3974 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3976 int64_t delta = pkt_dts - ist->next_dts;
3977 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3978 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3979 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3980 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3981 ifile->ts_offset -= delta;
3982 av_log(NULL, AV_LOG_DEBUG,
3983 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3984 delta, ifile->ts_offset);
3985 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3986 if (pkt.pts != AV_NOPTS_VALUE)
3987 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3990 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3991 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3992 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3993 pkt.dts = AV_NOPTS_VALUE;
3995 if (pkt.pts != AV_NOPTS_VALUE){
3996 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3997 delta = pkt_pts - ist->next_dts;
3998 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3999 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4000 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4001 pkt.pts = AV_NOPTS_VALUE;
4007 if (pkt.dts != AV_NOPTS_VALUE)
4008 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4011 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4012 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4013 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4014 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4015 av_ts2str(input_files[ist->file_index]->ts_offset),
4016 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4019 sub2video_heartbeat(ist, pkt.pts);
4021 process_input_packet(ist, &pkt, 0);
4024 av_packet_unref(&pkt);
4030 * Perform a step of transcoding for the specified filter graph.
4032 * @param[in] graph filter graph to consider
4033 * @param[out] best_ist input stream where a frame would allow to continue
4034 * @return 0 for success, <0 for error
4036 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4039 int nb_requests, nb_requests_max = 0;
4040 InputFilter *ifilter;
4044 ret = avfilter_graph_request_oldest(graph->graph);
4046 return reap_filters(0);
4048 if (ret == AVERROR_EOF) {
4049 ret = reap_filters(1);
4050 for (i = 0; i < graph->nb_outputs; i++)
4051 close_output_stream(graph->outputs[i]->ost);
4054 if (ret != AVERROR(EAGAIN))
4057 for (i = 0; i < graph->nb_inputs; i++) {
4058 ifilter = graph->inputs[i];
4060 if (input_files[ist->file_index]->eagain ||
4061 input_files[ist->file_index]->eof_reached)
4063 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4064 if (nb_requests > nb_requests_max) {
4065 nb_requests_max = nb_requests;
4071 for (i = 0; i < graph->nb_outputs; i++)
4072 graph->outputs[i]->ost->unavailable = 1;
4078 * Run a single step of transcoding.
4080 * @return 0 for success, <0 for error
4082 static int transcode_step(void)
4088 ost = choose_output();
4095 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4100 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4105 av_assert0(ost->source_index >= 0);
4106 ist = input_streams[ost->source_index];
4109 ret = process_input(ist->file_index);
4110 if (ret == AVERROR(EAGAIN)) {
4111 if (input_files[ist->file_index]->eagain)
4112 ost->unavailable = 1;
4117 return ret == AVERROR_EOF ? 0 : ret;
4119 return reap_filters(0);
4123 * The following code is the main loop of the file converter
4125 static int transcode(void)
4128 AVFormatContext *os;
4131 int64_t timer_start;
4132 int64_t total_packets_written = 0;
4134 ret = transcode_init();
4138 if (stdin_interaction) {
4139 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4142 timer_start = av_gettime_relative();
4145 if ((ret = init_input_threads()) < 0)
4149 while (!received_sigterm) {
4150 int64_t cur_time= av_gettime_relative();
4152 /* if 'q' pressed, exits */
4153 if (stdin_interaction)
4154 if (check_keyboard_interaction(cur_time) < 0)
4157 /* check if there's any stream where output is still needed */
4158 if (!need_output()) {
4159 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4163 ret = transcode_step();
4164 if (ret < 0 && ret != AVERROR_EOF) {
4166 av_strerror(ret, errbuf, sizeof(errbuf));
4168 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4172 /* dump report by using the output first video and audio streams */
4173 print_report(0, timer_start, cur_time);
4176 free_input_threads();
4179 /* at the end of stream, we must flush the decoder buffers */
4180 for (i = 0; i < nb_input_streams; i++) {
4181 ist = input_streams[i];
4182 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4183 process_input_packet(ist, NULL, 0);
4190 /* write the trailer if needed and close file */
4191 for (i = 0; i < nb_output_files; i++) {
4192 os = output_files[i]->ctx;
4193 if ((ret = av_write_trailer(os)) < 0) {
4194 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4200 /* dump report by using the first video and audio streams */
4201 print_report(1, timer_start, av_gettime_relative());
4203 /* close each encoder */
4204 for (i = 0; i < nb_output_streams; i++) {
4205 ost = output_streams[i];
4206 if (ost->encoding_needed) {
4207 av_freep(&ost->enc_ctx->stats_in);
4209 total_packets_written += ost->packets_written;
4212 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4213 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4217 /* close each decoder */
4218 for (i = 0; i < nb_input_streams; i++) {
4219 ist = input_streams[i];
4220 if (ist->decoding_needed) {
4221 avcodec_close(ist->dec_ctx);
4222 if (ist->hwaccel_uninit)
4223 ist->hwaccel_uninit(ist->dec_ctx);
4227 av_buffer_unref(&hw_device_ctx);
4234 free_input_threads();
4237 if (output_streams) {
4238 for (i = 0; i < nb_output_streams; i++) {
4239 ost = output_streams[i];
4242 if (fclose(ost->logfile))
4243 av_log(NULL, AV_LOG_ERROR,
4244 "Error closing logfile, loss of information possible: %s\n",
4245 av_err2str(AVERROR(errno)));
4246 ost->logfile = NULL;
4248 av_freep(&ost->forced_kf_pts);
4249 av_freep(&ost->apad);
4250 av_freep(&ost->disposition);
4251 av_dict_free(&ost->encoder_opts);
4252 av_dict_free(&ost->sws_dict);
4253 av_dict_free(&ost->swr_opts);
4254 av_dict_free(&ost->resample_opts);
4262 static int64_t getutime(void)
4265 struct rusage rusage;
4267 getrusage(RUSAGE_SELF, &rusage);
4268 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4269 #elif HAVE_GETPROCESSTIMES
4271 FILETIME c, e, k, u;
4272 proc = GetCurrentProcess();
4273 GetProcessTimes(proc, &c, &e, &k, &u);
4274 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4276 return av_gettime_relative();
4280 static int64_t getmaxrss(void)
4282 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4283 struct rusage rusage;
4284 getrusage(RUSAGE_SELF, &rusage);
4285 return (int64_t)rusage.ru_maxrss * 1024;
4286 #elif HAVE_GETPROCESSMEMORYINFO
4288 PROCESS_MEMORY_COUNTERS memcounters;
4289 proc = GetCurrentProcess();
4290 memcounters.cb = sizeof(memcounters);
4291 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4292 return memcounters.PeakPagefileUsage;
4298 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4302 int main(int argc, char **argv)
4307 register_exit(ffmpeg_cleanup);
4309 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4311 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4312 parse_loglevel(argc, argv, options);
4314 if(argc>1 && !strcmp(argv[1], "-d")){
4316 av_log_set_callback(log_callback_null);
4321 avcodec_register_all();
4323 avdevice_register_all();
4325 avfilter_register_all();
4327 avformat_network_init();
4329 show_banner(argc, argv, options);
4333 /* parse options and open all input/output files */
4334 ret = ffmpeg_parse_options(argc, argv);
4338 if (nb_output_files <= 0 && nb_input_files == 0) {
4340 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4344 /* file converter / grab */
4345 if (nb_output_files <= 0) {
4346 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4350 // if (nb_input_files == 0) {
4351 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4355 current_time = ti = getutime();
4356 if (transcode() < 0)
4358 ti = getutime() - ti;
4360 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4362 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4363 decode_error_stat[0], decode_error_stat[1]);
4364 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4367 exit_program(received_nb_signals ? 255 : main_return_code);
4368 return main_return_code;