2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
62 #include "libavutil/threadmessage.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
66 # include "libavfilter/avcodec.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
141 InputStream **input_streams = NULL;
142 int nb_input_streams = 0;
143 InputFile **input_files = NULL;
144 int nb_input_files = 0;
146 OutputStream **output_streams = NULL;
147 int nb_output_streams = 0;
148 OutputFile **output_files = NULL;
149 int nb_output_files = 0;
151 FilterGraph **filtergraphs;
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
162 static void free_input_threads(void);
166 Convert subtitles to video with alpha to insert them in filter graphs.
167 This is a temporary solution until libavfilter gets real subtitles support.
170 static int sub2video_get_blank_frame(InputStream *ist)
173 AVFrame *frame = ist->sub2video.frame;
175 av_frame_unref(frame);
176 ist->sub2video.frame->width = ist->sub2video.w;
177 ist->sub2video.frame->height = ist->sub2video.h;
178 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
179 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
181 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
188 uint32_t *pal, *dst2;
192 if (r->type != SUBTITLE_BITMAP) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
196 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
201 dst += r->y * dst_linesize + r->x * 4;
202 src = r->pict.data[0];
203 pal = (uint32_t *)r->pict.data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->pict.linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 int w = ist->sub2video.w, h = ist->sub2video.h;
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 istty = isatty(0) && isatty(2);
378 if (istty && tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
438 if (stdin->_cnt > 0) {
443 /* When running under a GUI, you will end here. */
444 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
445 // input pipe may have been closed by the program that ran ffmpeg
463 static int decode_interrupt_cb(void *ctx)
465 return received_nb_signals > transcode_init_done;
468 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
470 static void ffmpeg_cleanup(int ret)
475 int maxrss = getmaxrss() / 1024;
476 printf("bench: maxrss=%ikB\n", maxrss);
479 for (i = 0; i < nb_filtergraphs; i++) {
480 FilterGraph *fg = filtergraphs[i];
481 avfilter_graph_free(&fg->graph);
482 for (j = 0; j < fg->nb_inputs; j++) {
483 av_freep(&fg->inputs[j]->name);
484 av_freep(&fg->inputs[j]);
486 av_freep(&fg->inputs);
487 for (j = 0; j < fg->nb_outputs; j++) {
488 av_freep(&fg->outputs[j]->name);
489 av_freep(&fg->outputs[j]);
491 av_freep(&fg->outputs);
492 av_freep(&fg->graph_desc);
494 av_freep(&filtergraphs[i]);
496 av_freep(&filtergraphs);
498 av_freep(&subtitle_out);
501 for (i = 0; i < nb_output_files; i++) {
502 OutputFile *of = output_files[i];
503 AVFormatContext *s = of->ctx;
504 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
506 avformat_free_context(s);
507 av_dict_free(&of->opts);
509 av_freep(&output_files[i]);
511 for (i = 0; i < nb_output_streams; i++) {
512 OutputStream *ost = output_streams[i];
513 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
515 AVBitStreamFilterContext *next = bsfc->next;
516 av_bitstream_filter_close(bsfc);
519 ost->bitstream_filters = NULL;
520 av_frame_free(&ost->filtered_frame);
521 av_frame_free(&ost->last_frame);
523 av_parser_close(ost->parser);
525 av_freep(&ost->forced_keyframes);
526 av_expr_free(ost->forced_keyframes_pexpr);
527 av_freep(&ost->avfilter);
528 av_freep(&ost->logfile_prefix);
530 av_freep(&ost->audio_channels_map);
531 ost->audio_channels_mapped = 0;
533 avcodec_free_context(&ost->enc_ctx);
535 av_freep(&output_streams[i]);
538 free_input_threads();
540 for (i = 0; i < nb_input_files; i++) {
541 avformat_close_input(&input_files[i]->ctx);
542 av_freep(&input_files[i]);
544 for (i = 0; i < nb_input_streams; i++) {
545 InputStream *ist = input_streams[i];
547 av_frame_free(&ist->decoded_frame);
548 av_frame_free(&ist->filter_frame);
549 av_dict_free(&ist->decoder_opts);
550 avsubtitle_free(&ist->prev_sub.subtitle);
551 av_frame_free(&ist->sub2video.frame);
552 av_freep(&ist->filters);
553 av_freep(&ist->hwaccel_device);
555 avcodec_free_context(&ist->dec_ctx);
557 av_freep(&input_streams[i]);
562 av_freep(&vstats_filename);
564 av_freep(&input_streams);
565 av_freep(&input_files);
566 av_freep(&output_streams);
567 av_freep(&output_files);
571 avformat_network_deinit();
573 if (received_sigterm) {
574 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
575 (int) received_sigterm);
576 } else if (ret && transcode_init_done) {
577 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
583 void remove_avoptions(AVDictionary **a, AVDictionary *b)
585 AVDictionaryEntry *t = NULL;
587 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
588 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
592 void assert_avoptions(AVDictionary *m)
594 AVDictionaryEntry *t;
595 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
596 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
601 static void abort_codec_experimental(AVCodec *c, int encoder)
606 static void update_benchmark(const char *fmt, ...)
608 if (do_benchmark_all) {
609 int64_t t = getutime();
615 vsnprintf(buf, sizeof(buf), fmt, va);
617 printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
623 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
626 for (i = 0; i < nb_output_streams; i++) {
627 OutputStream *ost2 = output_streams[i];
628 ost2->finished |= ost == ost2 ? this_stream : others;
632 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
634 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
635 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
638 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
639 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
640 if (ost->st->codec->extradata) {
641 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
642 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
646 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
647 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
648 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
651 * Audio encoders may split the packets -- #frames in != #packets out.
652 * But there is no reordering, so we can limit the number of output packets
653 * by simply dropping them here.
654 * Counting encoded video frames needs to be done separately because of
655 * reordering, see do_video_out()
657 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
658 if (ost->frame_number >= ost->max_frames) {
666 av_packet_split_side_data(pkt);
669 AVPacket new_pkt = *pkt;
670 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
673 int a = av_bitstream_filter_filter(bsfc, avctx,
674 bsf_arg ? bsf_arg->value : NULL,
675 &new_pkt.data, &new_pkt.size,
676 pkt->data, pkt->size,
677 pkt->flags & AV_PKT_FLAG_KEY);
678 if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
679 uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
681 memcpy(t, new_pkt.data, new_pkt.size);
682 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
690 pkt->side_data = NULL;
691 pkt->side_data_elems = 0;
693 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
694 av_buffer_default_free, NULL, 0);
699 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
700 bsfc->filter->name, pkt->stream_index,
701 avctx->codec ? avctx->codec->name : "copy");
711 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
712 if (pkt->dts != AV_NOPTS_VALUE &&
713 pkt->pts != AV_NOPTS_VALUE &&
714 pkt->dts > pkt->pts) {
715 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
717 ost->file_index, ost->st->index);
719 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
720 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
721 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
724 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
725 pkt->dts != AV_NOPTS_VALUE &&
726 ost->last_mux_dts != AV_NOPTS_VALUE) {
727 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
728 if (pkt->dts < max) {
729 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
730 av_log(s, loglevel, "Non-monotonous DTS in output stream "
731 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
732 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
734 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
737 av_log(s, loglevel, "changing to %"PRId64". This may result "
738 "in incorrect timestamps in the output file.\n",
740 if(pkt->pts >= pkt->dts)
741 pkt->pts = FFMAX(pkt->pts, max);
746 ost->last_mux_dts = pkt->dts;
748 ost->data_size += pkt->size;
749 ost->packets_written++;
751 pkt->stream_index = ost->index;
754 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
755 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
756 av_get_media_type_string(ost->enc_ctx->codec_type),
757 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
758 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
763 ret = av_interleaved_write_frame(s, pkt);
765 print_error("av_interleaved_write_frame()", ret);
766 main_return_code = 1;
767 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
772 static void close_output_stream(OutputStream *ost)
774 OutputFile *of = output_files[ost->file_index];
776 ost->finished |= ENCODER_FINISHED;
778 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
779 of->recording_time = FFMIN(of->recording_time, end);
783 static int check_recording_time(OutputStream *ost)
785 OutputFile *of = output_files[ost->file_index];
787 if (of->recording_time != INT64_MAX &&
788 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
789 AV_TIME_BASE_Q) >= 0) {
790 close_output_stream(ost);
796 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
799 AVCodecContext *enc = ost->enc_ctx;
803 av_init_packet(&pkt);
807 if (!check_recording_time(ost))
810 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
811 frame->pts = ost->sync_opts;
812 ost->sync_opts = frame->pts + frame->nb_samples;
813 ost->samples_encoded += frame->nb_samples;
814 ost->frames_encoded++;
816 av_assert0(pkt.size || !pkt.data);
817 update_benchmark(NULL);
819 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
820 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
821 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
822 enc->time_base.num, enc->time_base.den);
825 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
826 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
829 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
832 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
835 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
836 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
837 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
838 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
841 write_frame(s, &pkt, ost);
845 static void do_subtitle_out(AVFormatContext *s,
850 int subtitle_out_max_size = 1024 * 1024;
851 int subtitle_out_size, nb, i;
856 if (sub->pts == AV_NOPTS_VALUE) {
857 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
866 subtitle_out = av_malloc(subtitle_out_max_size);
868 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
873 /* Note: DVB subtitle need one packet to draw them and one other
874 packet to clear them */
875 /* XXX: signal it in the codec context ? */
876 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
881 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
883 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
884 pts -= output_files[ost->file_index]->start_time;
885 for (i = 0; i < nb; i++) {
886 unsigned save_num_rects = sub->num_rects;
888 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
889 if (!check_recording_time(ost))
893 // start_display_time is required to be 0
894 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
895 sub->end_display_time -= sub->start_display_time;
896 sub->start_display_time = 0;
900 ost->frames_encoded++;
902 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
903 subtitle_out_max_size, sub);
905 sub->num_rects = save_num_rects;
906 if (subtitle_out_size < 0) {
907 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
911 av_init_packet(&pkt);
912 pkt.data = subtitle_out;
913 pkt.size = subtitle_out_size;
914 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
915 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
916 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
917 /* XXX: the pts correction is handled here. Maybe handling
918 it in the codec would be better */
920 pkt.pts += 90 * sub->start_display_time;
922 pkt.pts += 90 * sub->end_display_time;
925 write_frame(s, &pkt, ost);
929 static void do_video_out(AVFormatContext *s,
931 AVFrame *next_picture,
934 int ret, format_video_sync;
936 AVCodecContext *enc = ost->enc_ctx;
937 AVCodecContext *mux_enc = ost->st->codec;
938 int nb_frames, nb0_frames, i;
939 double delta, delta0;
942 InputStream *ist = NULL;
943 AVFilterContext *filter = ost->filter->filter;
945 if (ost->source_index >= 0)
946 ist = input_streams[ost->source_index];
948 if (filter->inputs[0]->frame_rate.num > 0 &&
949 filter->inputs[0]->frame_rate.den > 0)
950 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
952 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
953 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
955 if (!ost->filters_script &&
959 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
960 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
965 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
966 ost->last_nb0_frames[1],
967 ost->last_nb0_frames[2]);
969 delta0 = sync_ipts - ost->sync_opts;
970 delta = delta0 + duration;
972 /* by default, we output a single frame */
976 format_video_sync = video_sync_method;
977 if (format_video_sync == VSYNC_AUTO) {
978 if(!strcmp(s->oformat->name, "avi")) {
979 format_video_sync = VSYNC_VFR;
981 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
983 && format_video_sync == VSYNC_CFR
984 && input_files[ist->file_index]->ctx->nb_streams == 1
985 && input_files[ist->file_index]->input_ts_offset == 0) {
986 format_video_sync = VSYNC_VSCFR;
988 if (format_video_sync == VSYNC_CFR && copy_ts) {
989 format_video_sync = VSYNC_VSCFR;
995 format_video_sync != VSYNC_PASSTHROUGH &&
996 format_video_sync != VSYNC_DROP) {
997 double cor = FFMIN(-delta0, duration);
999 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1001 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1007 switch (format_video_sync) {
1009 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1010 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1013 ost->sync_opts = lrint(sync_ipts);
1016 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1017 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1019 } else if (delta < -1.1)
1021 else if (delta > 1.1) {
1022 nb_frames = lrintf(delta);
1024 nb0_frames = lrintf(delta0 - 0.6);
1030 else if (delta > 0.6)
1031 ost->sync_opts = lrint(sync_ipts);
1034 case VSYNC_PASSTHROUGH:
1035 ost->sync_opts = lrint(sync_ipts);
1042 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1043 nb0_frames = FFMIN(nb0_frames, nb_frames);
1045 memmove(ost->last_nb0_frames + 1,
1046 ost->last_nb0_frames,
1047 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1048 ost->last_nb0_frames[0] = nb0_frames;
1050 if (nb0_frames == 0 && ost->last_droped) {
1052 av_log(NULL, AV_LOG_VERBOSE,
1053 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1054 ost->frame_number, ost->st->index, ost->last_frame->pts);
1056 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1057 if (nb_frames > dts_error_threshold * 30) {
1058 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1062 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1063 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1065 ost->last_droped = nb_frames == nb0_frames && next_picture;
1067 /* duplicates frame if needed */
1068 for (i = 0; i < nb_frames; i++) {
1069 AVFrame *in_picture;
1070 av_init_packet(&pkt);
1074 if (i < nb0_frames && ost->last_frame) {
1075 in_picture = ost->last_frame;
1077 in_picture = next_picture;
1082 in_picture->pts = ost->sync_opts;
1085 if (!check_recording_time(ost))
1087 if (ost->frame_number >= ost->max_frames)
1091 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1092 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1093 /* raw pictures are written as AVPicture structure to
1094 avoid any copies. We support temporarily the older
1096 if (in_picture->interlaced_frame)
1097 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1099 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1100 pkt.data = (uint8_t *)in_picture;
1101 pkt.size = sizeof(AVPicture);
1102 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1103 pkt.flags |= AV_PKT_FLAG_KEY;
1105 write_frame(s, &pkt, ost);
1107 int got_packet, forced_keyframe = 0;
1110 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
1111 ost->top_field_first >= 0)
1112 in_picture->top_field_first = !!ost->top_field_first;
1114 if (in_picture->interlaced_frame) {
1115 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1116 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1118 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1120 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1122 in_picture->quality = enc->global_quality;
1123 in_picture->pict_type = 0;
1125 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1126 in_picture->pts * av_q2d(enc->time_base) : NAN;
1127 if (ost->forced_kf_index < ost->forced_kf_count &&
1128 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1129 ost->forced_kf_index++;
1130 forced_keyframe = 1;
1131 } else if (ost->forced_keyframes_pexpr) {
1133 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1134 res = av_expr_eval(ost->forced_keyframes_pexpr,
1135 ost->forced_keyframes_expr_const_values, NULL);
1136 av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1137 ost->forced_keyframes_expr_const_values[FKF_N],
1138 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1139 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1140 ost->forced_keyframes_expr_const_values[FKF_T],
1141 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1144 forced_keyframe = 1;
1145 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1146 ost->forced_keyframes_expr_const_values[FKF_N];
1147 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1148 ost->forced_keyframes_expr_const_values[FKF_T];
1149 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1152 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1153 } else if ( ost->forced_keyframes
1154 && !strncmp(ost->forced_keyframes, "source", 6)
1155 && in_picture->key_frame==1) {
1156 forced_keyframe = 1;
1159 if (forced_keyframe) {
1160 in_picture->pict_type = AV_PICTURE_TYPE_I;
1161 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1164 update_benchmark(NULL);
1166 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1167 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1168 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1169 enc->time_base.num, enc->time_base.den);
1172 ost->frames_encoded++;
1174 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1175 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1177 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1183 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1184 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1185 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1186 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1189 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1190 pkt.pts = ost->sync_opts;
1192 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1195 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1196 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1197 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1198 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1201 frame_size = pkt.size;
1202 write_frame(s, &pkt, ost);
1204 /* if two pass, output log */
1205 if (ost->logfile && enc->stats_out) {
1206 fprintf(ost->logfile, "%s", enc->stats_out);
1212 * For video, number of frames in == number of packets out.
1213 * But there may be reordering, so we can't throw away frames on encoder
1214 * flush, we need to limit them here, before they go into encoder.
1216 ost->frame_number++;
1218 if (vstats_filename && frame_size)
1219 do_video_stats(ost, frame_size);
1222 if (!ost->last_frame)
1223 ost->last_frame = av_frame_alloc();
1224 av_frame_unref(ost->last_frame);
1226 av_frame_ref(ost->last_frame, next_picture);
1228 av_frame_free(&ost->last_frame);
1231 static double psnr(double d)
1233 return -10.0 * log(d) / log(10.0);
1236 static void do_video_stats(OutputStream *ost, int frame_size)
1238 AVCodecContext *enc;
1240 double ti1, bitrate, avg_bitrate;
1242 /* this is executed just the first time do_video_stats is called */
1244 vstats_file = fopen(vstats_filename, "w");
1252 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1253 frame_number = ost->st->nb_frames;
1254 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1255 if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1256 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1258 fprintf(vstats_file,"f_size= %6d ", frame_size);
1259 /* compute pts value */
1260 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1264 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1265 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1266 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1267 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1268 fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1272 static void finish_output_stream(OutputStream *ost)
1274 OutputFile *of = output_files[ost->file_index];
1277 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1280 for (i = 0; i < of->ctx->nb_streams; i++)
1281 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1286 * Get and encode new output from any of the filtergraphs, without causing
1289 * @return 0 for success, <0 for severe errors
1291 static int reap_filters(int flush)
1293 AVFrame *filtered_frame = NULL;
1296 /* Reap all buffers present in the buffer sinks */
1297 for (i = 0; i < nb_output_streams; i++) {
1298 OutputStream *ost = output_streams[i];
1299 OutputFile *of = output_files[ost->file_index];
1300 AVFilterContext *filter;
1301 AVCodecContext *enc = ost->enc_ctx;
1306 filter = ost->filter->filter;
1308 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1309 return AVERROR(ENOMEM);
1311 filtered_frame = ost->filtered_frame;
1314 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1315 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1316 AV_BUFFERSINK_FLAG_NO_REQUEST);
1318 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1319 av_log(NULL, AV_LOG_WARNING,
1320 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1321 } else if (flush && ret == AVERROR_EOF) {
1322 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1323 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1327 if (ost->finished) {
1328 av_frame_unref(filtered_frame);
1331 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1332 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1333 AVRational tb = enc->time_base;
1334 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1336 tb.den <<= extra_bits;
1338 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1339 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1340 float_pts /= 1 << extra_bits;
1341 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1342 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1344 filtered_frame->pts =
1345 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1346 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1348 //if (ost->source_index >= 0)
1349 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1351 switch (filter->inputs[0]->type) {
1352 case AVMEDIA_TYPE_VIDEO:
1353 if (!ost->frame_aspect_ratio.num)
1354 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1357 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1358 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1360 enc->time_base.num, enc->time_base.den);
1363 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1365 case AVMEDIA_TYPE_AUDIO:
1366 if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1367 enc->channels != av_frame_get_channels(filtered_frame)) {
1368 av_log(NULL, AV_LOG_ERROR,
1369 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1372 do_audio_out(of->ctx, ost, filtered_frame);
1375 // TODO support subtitle filters
1379 av_frame_unref(filtered_frame);
1386 static void print_final_stats(int64_t total_size)
1388 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1389 uint64_t subtitle_size = 0;
1390 uint64_t data_size = 0;
1391 float percent = -1.0;
1395 for (i = 0; i < nb_output_streams; i++) {
1396 OutputStream *ost = output_streams[i];
1397 switch (ost->enc_ctx->codec_type) {
1398 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1399 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1400 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1401 default: other_size += ost->data_size; break;
1403 extra_size += ost->enc_ctx->extradata_size;
1404 data_size += ost->data_size;
1405 if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1406 != CODEC_FLAG_PASS1)
1410 if (data_size && total_size>0 && total_size >= data_size)
1411 percent = 100.0 * (total_size - data_size) / data_size;
1413 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1414 video_size / 1024.0,
1415 audio_size / 1024.0,
1416 subtitle_size / 1024.0,
1417 other_size / 1024.0,
1418 extra_size / 1024.0);
1420 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1422 av_log(NULL, AV_LOG_INFO, "unknown");
1423 av_log(NULL, AV_LOG_INFO, "\n");
1425 /* print verbose per-stream stats */
1426 for (i = 0; i < nb_input_files; i++) {
1427 InputFile *f = input_files[i];
1428 uint64_t total_packets = 0, total_size = 0;
1430 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1431 i, f->ctx->filename);
1433 for (j = 0; j < f->nb_streams; j++) {
1434 InputStream *ist = input_streams[f->ist_index + j];
1435 enum AVMediaType type = ist->dec_ctx->codec_type;
1437 total_size += ist->data_size;
1438 total_packets += ist->nb_packets;
1440 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1441 i, j, media_type_string(type));
1442 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1443 ist->nb_packets, ist->data_size);
1445 if (ist->decoding_needed) {
1446 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1447 ist->frames_decoded);
1448 if (type == AVMEDIA_TYPE_AUDIO)
1449 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1450 av_log(NULL, AV_LOG_VERBOSE, "; ");
1453 av_log(NULL, AV_LOG_VERBOSE, "\n");
1456 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1457 total_packets, total_size);
1460 for (i = 0; i < nb_output_files; i++) {
1461 OutputFile *of = output_files[i];
1462 uint64_t total_packets = 0, total_size = 0;
1464 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1465 i, of->ctx->filename);
1467 for (j = 0; j < of->ctx->nb_streams; j++) {
1468 OutputStream *ost = output_streams[of->ost_index + j];
1469 enum AVMediaType type = ost->enc_ctx->codec_type;
1471 total_size += ost->data_size;
1472 total_packets += ost->packets_written;
1474 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1475 i, j, media_type_string(type));
1476 if (ost->encoding_needed) {
1477 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1478 ost->frames_encoded);
1479 if (type == AVMEDIA_TYPE_AUDIO)
1480 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1481 av_log(NULL, AV_LOG_VERBOSE, "; ");
1484 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1485 ost->packets_written, ost->data_size);
1487 av_log(NULL, AV_LOG_VERBOSE, "\n");
1490 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1491 total_packets, total_size);
1493 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1494 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1496 av_log(NULL, AV_LOG_WARNING, "\n");
1498 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1503 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1506 AVBPrint buf_script;
1508 AVFormatContext *oc;
1510 AVCodecContext *enc;
1511 int frame_number, vid, i;
1513 int64_t pts = INT64_MIN;
1514 static int64_t last_time = -1;
1515 static int qp_histogram[52];
1516 int hours, mins, secs, us;
1518 if (!print_stats && !is_last_report && !progress_avio)
1521 if (!is_last_report) {
1522 if (last_time == -1) {
1523 last_time = cur_time;
1526 if ((cur_time - last_time) < 500000)
1528 last_time = cur_time;
1532 oc = output_files[0]->ctx;
1534 total_size = avio_size(oc->pb);
1535 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1536 total_size = avio_tell(oc->pb);
1540 av_bprint_init(&buf_script, 0, 1);
1541 for (i = 0; i < nb_output_streams; i++) {
1543 ost = output_streams[i];
1545 if (!ost->stream_copy && enc->coded_frame)
1546 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1547 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1548 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1549 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1550 ost->file_index, ost->index, q);
1552 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1553 float fps, t = (cur_time-timer_start) / 1000000.0;
1555 frame_number = ost->frame_number;
1556 fps = t > 1 ? frame_number / t : 0;
1557 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1558 frame_number, fps < 9.95, fps, q);
1559 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1560 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1561 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1562 ost->file_index, ost->index, q);
1564 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1568 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1570 for (j = 0; j < 32; j++)
1571 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1573 if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1575 double error, error_sum = 0;
1576 double scale, scale_sum = 0;
1578 char type[3] = { 'Y','U','V' };
1579 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1580 for (j = 0; j < 3; j++) {
1581 if (is_last_report) {
1582 error = enc->error[j];
1583 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1585 error = enc->coded_frame->error[j];
1586 scale = enc->width * enc->height * 255.0 * 255.0;
1592 p = psnr(error / scale);
1593 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1594 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1595 ost->file_index, ost->index, type[j] | 32, p);
1597 p = psnr(error_sum / scale_sum);
1598 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1599 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1600 ost->file_index, ost->index, p);
1604 /* compute min output value */
1605 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1606 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1607 ost->st->time_base, AV_TIME_BASE_Q));
1609 nb_frames_drop += ost->last_droped;
1612 secs = FFABS(pts) / AV_TIME_BASE;
1613 us = FFABS(pts) % AV_TIME_BASE;
1619 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1621 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1623 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624 "size=%8.0fkB time=", total_size / 1024.0);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1629 (100 * us) / AV_TIME_BASE);
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633 av_bprintf(&buf_script, "bitrate=N/A\n");
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1639 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643 hours, mins, secs, us);
1645 if (nb_frames_dup || nb_frames_drop)
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1647 nb_frames_dup, nb_frames_drop);
1648 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1651 if (print_stats || is_last_report) {
1652 const char end = is_last_report ? '\n' : '\r';
1653 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1654 fprintf(stderr, "%s %c", buf, end);
1656 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1661 if (progress_avio) {
1662 av_bprintf(&buf_script, "progress=%s\n",
1663 is_last_report ? "end" : "continue");
1664 avio_write(progress_avio, buf_script.str,
1665 FFMIN(buf_script.len, buf_script.size - 1));
1666 avio_flush(progress_avio);
1667 av_bprint_finalize(&buf_script, NULL);
1668 if (is_last_report) {
1669 avio_closep(&progress_avio);
1674 print_final_stats(total_size);
1677 static void flush_encoders(void)
1681 for (i = 0; i < nb_output_streams; i++) {
1682 OutputStream *ost = output_streams[i];
1683 AVCodecContext *enc = ost->enc_ctx;
1684 AVFormatContext *os = output_files[ost->file_index]->ctx;
1685 int stop_encoding = 0;
1687 if (!ost->encoding_needed)
1690 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1692 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1696 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1699 switch (enc->codec_type) {
1700 case AVMEDIA_TYPE_AUDIO:
1701 encode = avcodec_encode_audio2;
1704 case AVMEDIA_TYPE_VIDEO:
1705 encode = avcodec_encode_video2;
1716 av_init_packet(&pkt);
1720 update_benchmark(NULL);
1721 ret = encode(enc, &pkt, NULL, &got_packet);
1722 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1724 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1727 if (ost->logfile && enc->stats_out) {
1728 fprintf(ost->logfile, "%s", enc->stats_out);
1734 if (ost->finished & MUXER_FINISHED) {
1735 av_free_packet(&pkt);
1738 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1739 pkt_size = pkt.size;
1740 write_frame(os, &pkt, ost);
1741 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1742 do_video_stats(ost, pkt_size);
1753 * Check whether a packet from ist should be written into ost at this time
1755 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1757 OutputFile *of = output_files[ost->file_index];
1758 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1760 if (ost->source_index != ist_index)
1766 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1772 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1774 OutputFile *of = output_files[ost->file_index];
1775 InputFile *f = input_files [ist->file_index];
1776 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1777 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1778 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1782 av_init_packet(&opkt);
1784 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1785 !ost->copy_initial_nonkeyframes)
1788 if (pkt->pts == AV_NOPTS_VALUE) {
1789 if (!ost->frame_number && ist->pts < start_time &&
1790 !ost->copy_prior_start)
1793 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1794 !ost->copy_prior_start)
1798 if (of->recording_time != INT64_MAX &&
1799 ist->pts >= of->recording_time + start_time) {
1800 close_output_stream(ost);
1804 if (f->recording_time != INT64_MAX) {
1805 start_time = f->ctx->start_time;
1806 if (f->start_time != AV_NOPTS_VALUE)
1807 start_time += f->start_time;
1808 if (ist->pts >= f->recording_time + start_time) {
1809 close_output_stream(ost);
1814 /* force the input stream PTS */
1815 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1818 if (pkt->pts != AV_NOPTS_VALUE)
1819 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1821 opkt.pts = AV_NOPTS_VALUE;
1823 if (pkt->dts == AV_NOPTS_VALUE)
1824 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1826 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1827 opkt.dts -= ost_tb_start_time;
1829 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1830 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1832 duration = ist->dec_ctx->frame_size;
1833 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1834 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1835 ost->st->time_base) - ost_tb_start_time;
1838 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1839 opkt.flags = pkt->flags;
1841 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1842 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1843 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1844 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1845 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1847 if (av_parser_change(ost->parser, ost->st->codec,
1848 &opkt.data, &opkt.size,
1849 pkt->data, pkt->size,
1850 pkt->flags & AV_PKT_FLAG_KEY)) {
1851 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1856 opkt.data = pkt->data;
1857 opkt.size = pkt->size;
1859 av_copy_packet_side_data(&opkt, pkt);
1861 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1862 /* store AVPicture in AVPacket, as expected by the output format */
1863 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1864 opkt.data = (uint8_t *)&pict;
1865 opkt.size = sizeof(AVPicture);
1866 opkt.flags |= AV_PKT_FLAG_KEY;
1869 write_frame(of->ctx, &opkt, ost);
1872 int guess_input_channel_layout(InputStream *ist)
1874 AVCodecContext *dec = ist->dec_ctx;
1876 if (!dec->channel_layout) {
1877 char layout_name[256];
1879 if (dec->channels > ist->guess_layout_max)
1881 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1882 if (!dec->channel_layout)
1884 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1885 dec->channels, dec->channel_layout);
1886 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1887 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1892 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1894 AVFrame *decoded_frame, *f;
1895 AVCodecContext *avctx = ist->dec_ctx;
1896 int i, ret, err = 0, resample_changed;
1897 AVRational decoded_frame_tb;
1899 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1900 return AVERROR(ENOMEM);
1901 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1902 return AVERROR(ENOMEM);
1903 decoded_frame = ist->decoded_frame;
1905 update_benchmark(NULL);
1906 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1907 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1909 if (ret >= 0 && avctx->sample_rate <= 0) {
1910 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1911 ret = AVERROR_INVALIDDATA;
1914 if (*got_output || ret<0)
1915 decode_error_stat[ret<0] ++;
1917 if (ret < 0 && exit_on_error)
1920 if (!*got_output || ret < 0)
1923 ist->samples_decoded += decoded_frame->nb_samples;
1924 ist->frames_decoded++;
1927 /* increment next_dts to use for the case where the input stream does not
1928 have timestamps or there are multiple frames in the packet */
1929 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1931 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1935 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1936 ist->resample_channels != avctx->channels ||
1937 ist->resample_channel_layout != decoded_frame->channel_layout ||
1938 ist->resample_sample_rate != decoded_frame->sample_rate;
1939 if (resample_changed) {
1940 char layout1[64], layout2[64];
1942 if (!guess_input_channel_layout(ist)) {
1943 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1944 "layout for Input Stream #%d.%d\n", ist->file_index,
1948 decoded_frame->channel_layout = avctx->channel_layout;
1950 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1951 ist->resample_channel_layout);
1952 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1953 decoded_frame->channel_layout);
1955 av_log(NULL, AV_LOG_INFO,
1956 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1957 ist->file_index, ist->st->index,
1958 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1959 ist->resample_channels, layout1,
1960 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1961 avctx->channels, layout2);
1963 ist->resample_sample_fmt = decoded_frame->format;
1964 ist->resample_sample_rate = decoded_frame->sample_rate;
1965 ist->resample_channel_layout = decoded_frame->channel_layout;
1966 ist->resample_channels = avctx->channels;
1968 for (i = 0; i < nb_filtergraphs; i++)
1969 if (ist_in_filtergraph(filtergraphs[i], ist)) {
1970 FilterGraph *fg = filtergraphs[i];
1971 if (configure_filtergraph(fg) < 0) {
1972 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1978 /* if the decoder provides a pts, use it instead of the last packet pts.
1979 the decoder could be delaying output by a packet or more. */
1980 if (decoded_frame->pts != AV_NOPTS_VALUE) {
1981 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1982 decoded_frame_tb = avctx->time_base;
1983 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1984 decoded_frame->pts = decoded_frame->pkt_pts;
1985 decoded_frame_tb = ist->st->time_base;
1986 } else if (pkt->pts != AV_NOPTS_VALUE) {
1987 decoded_frame->pts = pkt->pts;
1988 decoded_frame_tb = ist->st->time_base;
1990 decoded_frame->pts = ist->dts;
1991 decoded_frame_tb = AV_TIME_BASE_Q;
1993 pkt->pts = AV_NOPTS_VALUE;
1994 if (decoded_frame->pts != AV_NOPTS_VALUE)
1995 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1996 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1997 (AVRational){1, avctx->sample_rate});
1998 for (i = 0; i < ist->nb_filters; i++) {
1999 if (i < ist->nb_filters - 1) {
2000 f = ist->filter_frame;
2001 err = av_frame_ref(f, decoded_frame);
2006 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2007 AV_BUFFERSRC_FLAG_PUSH);
2008 if (err == AVERROR_EOF)
2009 err = 0; /* ignore */
2013 decoded_frame->pts = AV_NOPTS_VALUE;
2015 av_frame_unref(ist->filter_frame);
2016 av_frame_unref(decoded_frame);
2017 return err < 0 ? err : ret;
2020 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2022 AVFrame *decoded_frame, *f;
2023 int i, ret = 0, err = 0, resample_changed;
2024 int64_t best_effort_timestamp;
2025 AVRational *frame_sample_aspect;
2027 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2028 return AVERROR(ENOMEM);
2029 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2030 return AVERROR(ENOMEM);
2031 decoded_frame = ist->decoded_frame;
2032 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2034 update_benchmark(NULL);
2035 ret = avcodec_decode_video2(ist->dec_ctx,
2036 decoded_frame, got_output, pkt);
2037 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2039 // The following line may be required in some cases where there is no parser
2040 // or the parser does not has_b_frames correctly
2041 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2042 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2043 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2045 av_log_ask_for_sample(
2047 "has_b_frames is larger in decoder than demuxer %d > %d ",
2048 ist->dec_ctx->has_b_frames,
2049 ist->st->codec->has_b_frames
2053 if (*got_output || ret<0)
2054 decode_error_stat[ret<0] ++;
2056 if (ret < 0 && exit_on_error)
2059 if (*got_output && ret >= 0) {
2060 if (ist->dec_ctx->width != decoded_frame->width ||
2061 ist->dec_ctx->height != decoded_frame->height ||
2062 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2063 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2064 decoded_frame->width,
2065 decoded_frame->height,
2066 decoded_frame->format,
2067 ist->dec_ctx->width,
2068 ist->dec_ctx->height,
2069 ist->dec_ctx->pix_fmt);
2073 if (!*got_output || ret < 0)
2076 if(ist->top_field_first>=0)
2077 decoded_frame->top_field_first = ist->top_field_first;
2079 ist->frames_decoded++;
2081 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2082 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2086 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2088 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2089 if(best_effort_timestamp != AV_NOPTS_VALUE)
2090 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2093 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2094 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2095 ist->st->index, av_ts2str(decoded_frame->pts),
2096 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2097 best_effort_timestamp,
2098 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2099 decoded_frame->key_frame, decoded_frame->pict_type,
2100 ist->st->time_base.num, ist->st->time_base.den);
2105 if (ist->st->sample_aspect_ratio.num)
2106 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2108 resample_changed = ist->resample_width != decoded_frame->width ||
2109 ist->resample_height != decoded_frame->height ||
2110 ist->resample_pix_fmt != decoded_frame->format;
2111 if (resample_changed) {
2112 av_log(NULL, AV_LOG_INFO,
2113 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2114 ist->file_index, ist->st->index,
2115 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2116 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2118 ist->resample_width = decoded_frame->width;
2119 ist->resample_height = decoded_frame->height;
2120 ist->resample_pix_fmt = decoded_frame->format;
2122 for (i = 0; i < nb_filtergraphs; i++) {
2123 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2124 configure_filtergraph(filtergraphs[i]) < 0) {
2125 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2131 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2132 for (i = 0; i < ist->nb_filters; i++) {
2133 if (!frame_sample_aspect->num)
2134 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2136 if (i < ist->nb_filters - 1) {
2137 f = ist->filter_frame;
2138 err = av_frame_ref(f, decoded_frame);
2143 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2144 if (ret == AVERROR_EOF) {
2145 ret = 0; /* ignore */
2146 } else if (ret < 0) {
2147 av_log(NULL, AV_LOG_FATAL,
2148 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2154 av_frame_unref(ist->filter_frame);
2155 av_frame_unref(decoded_frame);
2156 return err < 0 ? err : ret;
2159 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2161 AVSubtitle subtitle;
2162 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2163 &subtitle, got_output, pkt);
2165 if (*got_output || ret<0)
2166 decode_error_stat[ret<0] ++;
2168 if (ret < 0 && exit_on_error)
2171 if (ret < 0 || !*got_output) {
2173 sub2video_flush(ist);
2177 if (ist->fix_sub_duration) {
2179 if (ist->prev_sub.got_output) {
2180 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2181 1000, AV_TIME_BASE);
2182 if (end < ist->prev_sub.subtitle.end_display_time) {
2183 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2184 "Subtitle duration reduced from %d to %d%s\n",
2185 ist->prev_sub.subtitle.end_display_time, end,
2186 end <= 0 ? ", dropping it" : "");
2187 ist->prev_sub.subtitle.end_display_time = end;
2190 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2191 FFSWAP(int, ret, ist->prev_sub.ret);
2192 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2200 sub2video_update(ist, &subtitle);
2202 if (!subtitle.num_rects)
2205 ist->frames_decoded++;
2207 for (i = 0; i < nb_output_streams; i++) {
2208 OutputStream *ost = output_streams[i];
2210 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2211 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2214 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2218 avsubtitle_free(&subtitle);
2222 static int send_filter_eof(InputStream *ist)
2225 for (i = 0; i < ist->nb_filters; i++) {
2227 ret = av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2229 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2237 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2238 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2244 if (!ist->saw_first_ts) {
2245 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2247 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2248 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2249 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2251 ist->saw_first_ts = 1;
2254 if (ist->next_dts == AV_NOPTS_VALUE)
2255 ist->next_dts = ist->dts;
2256 if (ist->next_pts == AV_NOPTS_VALUE)
2257 ist->next_pts = ist->pts;
2261 av_init_packet(&avpkt);
2269 if (pkt->dts != AV_NOPTS_VALUE) {
2270 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2271 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2272 ist->next_pts = ist->pts = ist->dts;
2275 // while we have more to decode or while the decoder did output something on EOF
2276 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2280 ist->pts = ist->next_pts;
2281 ist->dts = ist->next_dts;
2283 if (avpkt.size && avpkt.size != pkt->size &&
2284 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2285 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2286 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2287 ist->showed_multi_packet_warning = 1;
2290 switch (ist->dec_ctx->codec_type) {
2291 case AVMEDIA_TYPE_AUDIO:
2292 ret = decode_audio (ist, &avpkt, &got_output);
2294 case AVMEDIA_TYPE_VIDEO:
2295 ret = decode_video (ist, &avpkt, &got_output);
2296 if (avpkt.duration) {
2297 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2298 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2299 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2300 duration = ((int64_t)AV_TIME_BASE *
2301 ist->dec_ctx->framerate.den * ticks) /
2302 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2306 if(ist->dts != AV_NOPTS_VALUE && duration) {
2307 ist->next_dts += duration;
2309 ist->next_dts = AV_NOPTS_VALUE;
2312 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2314 case AVMEDIA_TYPE_SUBTITLE:
2315 ret = transcode_subtitles(ist, &avpkt, &got_output);
2322 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2323 ist->file_index, ist->st->index, av_err2str(ret));
2330 avpkt.pts= AV_NOPTS_VALUE;
2332 // touch data and size only if not EOF
2334 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2342 if (got_output && !pkt)
2346 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2347 if (!pkt && ist->decoding_needed && !got_output) {
2348 int ret = send_filter_eof(ist);
2350 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2355 /* handle stream copy */
2356 if (!ist->decoding_needed) {
2357 ist->dts = ist->next_dts;
2358 switch (ist->dec_ctx->codec_type) {
2359 case AVMEDIA_TYPE_AUDIO:
2360 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2361 ist->dec_ctx->sample_rate;
2363 case AVMEDIA_TYPE_VIDEO:
2364 if (ist->framerate.num) {
2365 // TODO: Remove work-around for c99-to-c89 issue 7
2366 AVRational time_base_q = AV_TIME_BASE_Q;
2367 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2368 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2369 } else if (pkt->duration) {
2370 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2371 } else if(ist->dec_ctx->framerate.num != 0) {
2372 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2373 ist->next_dts += ((int64_t)AV_TIME_BASE *
2374 ist->dec_ctx->framerate.den * ticks) /
2375 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2379 ist->pts = ist->dts;
2380 ist->next_pts = ist->next_dts;
2382 for (i = 0; pkt && i < nb_output_streams; i++) {
2383 OutputStream *ost = output_streams[i];
2385 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2388 do_streamcopy(ist, ost, pkt);
2394 static void print_sdp(void)
2399 AVIOContext *sdp_pb;
2400 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2404 for (i = 0, j = 0; i < nb_output_files; i++) {
2405 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2406 avc[j] = output_files[i]->ctx;
2411 av_sdp_create(avc, j, sdp, sizeof(sdp));
2413 if (!sdp_filename) {
2414 printf("SDP:\n%s\n", sdp);
2417 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2418 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2420 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2421 avio_closep(&sdp_pb);
2422 av_freep(&sdp_filename);
2429 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2432 for (i = 0; hwaccels[i].name; i++)
2433 if (hwaccels[i].pix_fmt == pix_fmt)
2434 return &hwaccels[i];
2438 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2440 InputStream *ist = s->opaque;
2441 const enum AVPixelFormat *p;
2444 for (p = pix_fmts; *p != -1; p++) {
2445 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2446 const HWAccel *hwaccel;
2448 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2451 hwaccel = get_hwaccel(*p);
2453 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2454 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2457 ret = hwaccel->init(s);
2459 if (ist->hwaccel_id == hwaccel->id) {
2460 av_log(NULL, AV_LOG_FATAL,
2461 "%s hwaccel requested for input stream #%d:%d, "
2462 "but cannot be initialized.\n", hwaccel->name,
2463 ist->file_index, ist->st->index);
2464 return AV_PIX_FMT_NONE;
2468 ist->active_hwaccel_id = hwaccel->id;
2469 ist->hwaccel_pix_fmt = *p;
2476 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2478 InputStream *ist = s->opaque;
2480 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2481 return ist->hwaccel_get_buffer(s, frame, flags);
2483 return avcodec_default_get_buffer2(s, frame, flags);
2486 static int init_input_stream(int ist_index, char *error, int error_len)
2489 InputStream *ist = input_streams[ist_index];
2491 if (ist->decoding_needed) {
2492 AVCodec *codec = ist->dec;
2494 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2495 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2496 return AVERROR(EINVAL);
2499 ist->dec_ctx->opaque = ist;
2500 ist->dec_ctx->get_format = get_format;
2501 ist->dec_ctx->get_buffer2 = get_buffer;
2502 ist->dec_ctx->thread_safe_callbacks = 1;
2504 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2505 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2506 (ist->decoding_needed & DECODING_FOR_OST)) {
2507 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2508 if (ist->decoding_needed & DECODING_FOR_FILTER)
2509 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2512 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2513 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2514 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2515 if (ret == AVERROR_EXPERIMENTAL)
2516 abort_codec_experimental(codec, 0);
2518 snprintf(error, error_len,
2519 "Error while opening decoder for input stream "
2521 ist->file_index, ist->st->index, av_err2str(ret));
2524 assert_avoptions(ist->decoder_opts);
2527 ist->next_pts = AV_NOPTS_VALUE;
2528 ist->next_dts = AV_NOPTS_VALUE;
2533 static InputStream *get_input_stream(OutputStream *ost)
2535 if (ost->source_index >= 0)
2536 return input_streams[ost->source_index];
2540 static int compare_int64(const void *a, const void *b)
2542 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2543 return va < vb ? -1 : va > vb ? +1 : 0;
2546 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2547 AVCodecContext *avctx)
2550 int n = 1, i, size, index = 0;
2553 for (p = kf; *p; p++)
2557 pts = av_malloc_array(size, sizeof(*pts));
2559 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2564 for (i = 0; i < n; i++) {
2565 char *next = strchr(p, ',');
2570 if (!memcmp(p, "chapters", 8)) {
2572 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2575 if (avf->nb_chapters > INT_MAX - size ||
2576 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2578 av_log(NULL, AV_LOG_FATAL,
2579 "Could not allocate forced key frames array.\n");
2582 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2583 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2585 for (j = 0; j < avf->nb_chapters; j++) {
2586 AVChapter *c = avf->chapters[j];
2587 av_assert1(index < size);
2588 pts[index++] = av_rescale_q(c->start, c->time_base,
2589 avctx->time_base) + t;
2594 t = parse_time_or_die("force_key_frames", p, 1);
2595 av_assert1(index < size);
2596 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2603 av_assert0(index == size);
2604 qsort(pts, size, sizeof(*pts), compare_int64);
2605 ost->forced_kf_count = size;
2606 ost->forced_kf_pts = pts;
2609 static void report_new_stream(int input_index, AVPacket *pkt)
2611 InputFile *file = input_files[input_index];
2612 AVStream *st = file->ctx->streams[pkt->stream_index];
2614 if (pkt->stream_index < file->nb_streams_warn)
2616 av_log(file->ctx, AV_LOG_WARNING,
2617 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2618 av_get_media_type_string(st->codec->codec_type),
2619 input_index, pkt->stream_index,
2620 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2621 file->nb_streams_warn = pkt->stream_index + 1;
2624 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2626 AVDictionaryEntry *e;
2628 uint8_t *encoder_string;
2629 int encoder_string_len;
2630 int format_flags = 0;
2631 int codec_flags = 0;
2633 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2636 e = av_dict_get(of->opts, "fflags", NULL, 0);
2638 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2641 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2643 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2645 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2648 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2651 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2652 encoder_string = av_mallocz(encoder_string_len);
2653 if (!encoder_string)
2656 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2657 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2659 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2660 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2661 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2662 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2665 static int transcode_init(void)
2667 int ret = 0, i, j, k;
2668 AVFormatContext *oc;
2671 char error[1024] = {0};
2674 for (i = 0; i < nb_filtergraphs; i++) {
2675 FilterGraph *fg = filtergraphs[i];
2676 for (j = 0; j < fg->nb_outputs; j++) {
2677 OutputFilter *ofilter = fg->outputs[j];
2678 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2680 if (fg->nb_inputs != 1)
2682 for (k = nb_input_streams-1; k >= 0 ; k--)
2683 if (fg->inputs[0]->ist == input_streams[k])
2685 ofilter->ost->source_index = k;
2689 /* init framerate emulation */
2690 for (i = 0; i < nb_input_files; i++) {
2691 InputFile *ifile = input_files[i];
2692 if (ifile->rate_emu)
2693 for (j = 0; j < ifile->nb_streams; j++)
2694 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2697 /* output stream init */
2698 for (i = 0; i < nb_output_files; i++) {
2699 oc = output_files[i]->ctx;
2700 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2701 av_dump_format(oc, i, oc->filename, 1);
2702 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2703 return AVERROR(EINVAL);
2707 /* init complex filtergraphs */
2708 for (i = 0; i < nb_filtergraphs; i++)
2709 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2712 /* for each output stream, we compute the right encoding parameters */
2713 for (i = 0; i < nb_output_streams; i++) {
2714 AVCodecContext *enc_ctx;
2715 AVCodecContext *dec_ctx = NULL;
2716 ost = output_streams[i];
2717 oc = output_files[ost->file_index]->ctx;
2718 ist = get_input_stream(ost);
2720 if (ost->attachment_filename)
2723 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2726 dec_ctx = ist->dec_ctx;
2728 ost->st->disposition = ist->st->disposition;
2729 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2730 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2732 for (j=0; j<oc->nb_streams; j++) {
2733 AVStream *st = oc->streams[j];
2734 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2737 if (j == oc->nb_streams)
2738 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2739 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2742 if (ost->stream_copy) {
2744 uint64_t extra_size;
2746 av_assert0(ist && !ost->filter);
2748 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2750 if (extra_size > INT_MAX) {
2751 return AVERROR(EINVAL);
2754 /* if stream_copy is selected, no need to decode or encode */
2755 enc_ctx->codec_id = dec_ctx->codec_id;
2756 enc_ctx->codec_type = dec_ctx->codec_type;
2758 if (!enc_ctx->codec_tag) {
2759 unsigned int codec_tag;
2760 if (!oc->oformat->codec_tag ||
2761 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2762 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2763 enc_ctx->codec_tag = dec_ctx->codec_tag;
2766 enc_ctx->bit_rate = dec_ctx->bit_rate;
2767 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2768 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2769 enc_ctx->field_order = dec_ctx->field_order;
2770 if (dec_ctx->extradata_size) {
2771 enc_ctx->extradata = av_mallocz(extra_size);
2772 if (!enc_ctx->extradata) {
2773 return AVERROR(ENOMEM);
2775 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2777 enc_ctx->extradata_size= dec_ctx->extradata_size;
2778 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2780 enc_ctx->time_base = ist->st->time_base;
2782 * Avi is a special case here because it supports variable fps but
2783 * having the fps and timebase differe significantly adds quite some
2786 if(!strcmp(oc->oformat->name, "avi")) {
2787 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2788 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2789 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2790 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2792 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2793 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2794 enc_ctx->ticks_per_frame = 2;
2795 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2796 && av_q2d(ist->st->time_base) < 1.0/500
2798 enc_ctx->time_base = dec_ctx->time_base;
2799 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2800 enc_ctx->time_base.den *= 2;
2801 enc_ctx->ticks_per_frame = 2;
2803 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2804 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2805 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2806 && strcmp(oc->oformat->name, "f4v")
2808 if( copy_tb<0 && dec_ctx->time_base.den
2809 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2810 && av_q2d(ist->st->time_base) < 1.0/500
2812 enc_ctx->time_base = dec_ctx->time_base;
2813 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2816 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2817 && dec_ctx->time_base.num < dec_ctx->time_base.den
2818 && dec_ctx->time_base.num > 0
2819 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2820 enc_ctx->time_base = dec_ctx->time_base;
2823 if (ist && !ost->frame_rate.num)
2824 ost->frame_rate = ist->framerate;
2825 if(ost->frame_rate.num)
2826 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2828 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2829 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2831 if (ist->st->nb_side_data) {
2832 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2833 sizeof(*ist->st->side_data));
2834 if (!ost->st->side_data)
2835 return AVERROR(ENOMEM);
2837 ost->st->nb_side_data = 0;
2838 for (j = 0; j < ist->st->nb_side_data; j++) {
2839 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2840 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2842 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2845 sd_dst->data = av_malloc(sd_src->size);
2847 return AVERROR(ENOMEM);
2848 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2849 sd_dst->size = sd_src->size;
2850 sd_dst->type = sd_src->type;
2851 ost->st->nb_side_data++;
2855 ost->parser = av_parser_init(enc_ctx->codec_id);
2857 switch (enc_ctx->codec_type) {
2858 case AVMEDIA_TYPE_AUDIO:
2859 if (audio_volume != 256) {
2860 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2863 enc_ctx->channel_layout = dec_ctx->channel_layout;
2864 enc_ctx->sample_rate = dec_ctx->sample_rate;
2865 enc_ctx->channels = dec_ctx->channels;
2866 enc_ctx->frame_size = dec_ctx->frame_size;
2867 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2868 enc_ctx->block_align = dec_ctx->block_align;
2869 enc_ctx->initial_padding = dec_ctx->delay;
2870 #if FF_API_AUDIOENC_DELAY
2871 enc_ctx->delay = dec_ctx->delay;
2873 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2874 enc_ctx->block_align= 0;
2875 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2876 enc_ctx->block_align= 0;
2878 case AVMEDIA_TYPE_VIDEO:
2879 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2880 enc_ctx->width = dec_ctx->width;
2881 enc_ctx->height = dec_ctx->height;
2882 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2883 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2885 av_mul_q(ost->frame_aspect_ratio,
2886 (AVRational){ enc_ctx->height, enc_ctx->width });
2887 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2888 "with stream copy may produce invalid files\n");
2890 else if (ist->st->sample_aspect_ratio.num)
2891 sar = ist->st->sample_aspect_ratio;
2893 sar = dec_ctx->sample_aspect_ratio;
2894 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2895 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2896 ost->st->r_frame_rate = ist->st->r_frame_rate;
2898 case AVMEDIA_TYPE_SUBTITLE:
2899 enc_ctx->width = dec_ctx->width;
2900 enc_ctx->height = dec_ctx->height;
2902 case AVMEDIA_TYPE_UNKNOWN:
2903 case AVMEDIA_TYPE_DATA:
2904 case AVMEDIA_TYPE_ATTACHMENT:
2911 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2913 /* should only happen when a default codec is not present. */
2914 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2915 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2916 ret = AVERROR(EINVAL);
2921 ist->decoding_needed |= DECODING_FOR_OST;
2922 ost->encoding_needed = 1;
2924 set_encoder_id(output_files[ost->file_index], ost);
2927 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2928 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2930 fg = init_simple_filtergraph(ist, ost);
2931 if (configure_filtergraph(fg)) {
2932 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2937 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2938 if (!ost->frame_rate.num)
2939 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2940 if (ist && !ost->frame_rate.num)
2941 ost->frame_rate = ist->framerate;
2942 if (ist && !ost->frame_rate.num)
2943 ost->frame_rate = ist->st->r_frame_rate;
2944 if (ist && !ost->frame_rate.num) {
2945 ost->frame_rate = (AVRational){25, 1};
2946 av_log(NULL, AV_LOG_WARNING,
2948 "about the input framerate is available. Falling "
2949 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2950 "if you want a different framerate.\n",
2951 ost->file_index, ost->index);
2953 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2954 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2955 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2956 ost->frame_rate = ost->enc->supported_framerates[idx];
2958 // reduce frame rate for mpeg4 to be within the spec limits
2959 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2960 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2961 ost->frame_rate.num, ost->frame_rate.den, 65535);
2965 switch (enc_ctx->codec_type) {
2966 case AVMEDIA_TYPE_AUDIO:
2967 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2968 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2969 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2970 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2971 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2973 case AVMEDIA_TYPE_VIDEO:
2974 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2975 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
2976 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2977 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2978 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
2979 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2980 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2982 for (j = 0; j < ost->forced_kf_count; j++)
2983 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2985 enc_ctx->time_base);
2987 enc_ctx->width = ost->filter->filter->inputs[0]->w;
2988 enc_ctx->height = ost->filter->filter->inputs[0]->h;
2989 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2990 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2991 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2992 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2993 if (!strncmp(ost->enc->name, "libx264", 7) &&
2994 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2995 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
2996 av_log(NULL, AV_LOG_WARNING,
2997 "No pixel format specified, %s for H.264 encoding chosen.\n"
2998 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2999 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3000 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3001 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3002 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3003 av_log(NULL, AV_LOG_WARNING,
3004 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3005 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3006 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3007 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3009 ost->st->avg_frame_rate = ost->frame_rate;
3012 enc_ctx->width != dec_ctx->width ||
3013 enc_ctx->height != dec_ctx->height ||
3014 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3015 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3018 if (ost->forced_keyframes) {
3019 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3020 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3021 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3023 av_log(NULL, AV_LOG_ERROR,
3024 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3027 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3028 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3029 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3030 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3032 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3033 // parse it only for static kf timings
3034 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3035 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3039 case AVMEDIA_TYPE_SUBTITLE:
3040 enc_ctx->time_base = (AVRational){1, 1000};
3041 if (!enc_ctx->width) {
3042 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3043 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3046 case AVMEDIA_TYPE_DATA:
3053 if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
3054 char logfilename[1024];
3057 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
3058 ost->logfile_prefix ? ost->logfile_prefix :
3059 DEFAULT_PASS_LOGFILENAME_PREFIX,
3061 if (!strcmp(ost->enc->name, "libx264")) {
3062 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
3064 if (enc_ctx->flags & CODEC_FLAG_PASS2) {
3066 size_t logbuffer_size;
3067 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
3068 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
3072 enc_ctx->stats_in = logbuffer;
3074 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
3075 f = av_fopen_utf8(logfilename, "wb");
3077 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
3078 logfilename, strerror(errno));
3087 if (ost->disposition) {
3088 static const AVOption opts[] = {
3089 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3090 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3091 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3092 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3093 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3094 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3095 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3096 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3097 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3098 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3099 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3100 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3101 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3102 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3105 static const AVClass class = {
3107 .item_name = av_default_item_name,
3109 .version = LIBAVUTIL_VERSION_INT,
3111 const AVClass *pclass = &class;
3113 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3119 /* open each encoder */
3120 for (i = 0; i < nb_output_streams; i++) {
3121 ost = output_streams[i];
3122 if (ost->encoding_needed) {
3123 AVCodec *codec = ost->enc;
3124 AVCodecContext *dec = NULL;
3126 if ((ist = get_input_stream(ost)))
3128 if (dec && dec->subtitle_header) {
3129 /* ASS code assumes this buffer is null terminated so add extra byte. */
3130 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3131 if (!ost->enc_ctx->subtitle_header) {
3132 ret = AVERROR(ENOMEM);
3135 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3136 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3138 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3139 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3140 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
3142 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3143 if (ret == AVERROR_EXPERIMENTAL)
3144 abort_codec_experimental(codec, 1);
3145 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3146 ost->file_index, ost->index);
3149 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3150 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
3151 av_buffersink_set_frame_size(ost->filter->filter,
3152 ost->enc_ctx->frame_size);
3153 assert_avoptions(ost->encoder_opts);
3154 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3155 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3156 " It takes bits/s as argument, not kbits/s\n");
3158 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3160 av_log(NULL, AV_LOG_FATAL,
3161 "Error initializing the output stream codec context.\n");
3165 // copy timebase while removing common factors
3166 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3167 ost->st->codec->codec= ost->enc_ctx->codec;
3169 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3171 av_log(NULL, AV_LOG_FATAL,
3172 "Error setting up codec context options.\n");
3175 // copy timebase while removing common factors
3176 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
3180 /* init input streams */
3181 for (i = 0; i < nb_input_streams; i++)
3182 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3183 for (i = 0; i < nb_output_streams; i++) {
3184 ost = output_streams[i];
3185 avcodec_close(ost->enc_ctx);
3190 /* discard unused programs */
3191 for (i = 0; i < nb_input_files; i++) {
3192 InputFile *ifile = input_files[i];
3193 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3194 AVProgram *p = ifile->ctx->programs[j];
3195 int discard = AVDISCARD_ALL;
3197 for (k = 0; k < p->nb_stream_indexes; k++)
3198 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3199 discard = AVDISCARD_DEFAULT;
3202 p->discard = discard;
3206 /* open files and write file headers */
3207 for (i = 0; i < nb_output_files; i++) {
3208 oc = output_files[i]->ctx;
3209 oc->interrupt_callback = int_cb;
3210 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3211 snprintf(error, sizeof(error),
3212 "Could not write header for output file #%d "
3213 "(incorrect codec parameters ?): %s",
3214 i, av_err2str(ret));
3215 ret = AVERROR(EINVAL);
3218 // assert_avoptions(output_files[i]->opts);
3219 if (strcmp(oc->oformat->name, "rtp")) {
3225 /* dump the file output parameters - cannot be done before in case
3227 for (i = 0; i < nb_output_files; i++) {
3228 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3231 /* dump the stream mapping */
3232 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3233 for (i = 0; i < nb_input_streams; i++) {
3234 ist = input_streams[i];
3236 for (j = 0; j < ist->nb_filters; j++) {
3237 if (ist->filters[j]->graph->graph_desc) {
3238 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3239 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3240 ist->filters[j]->name);
3241 if (nb_filtergraphs > 1)
3242 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3243 av_log(NULL, AV_LOG_INFO, "\n");
3248 for (i = 0; i < nb_output_streams; i++) {
3249 ost = output_streams[i];
3251 if (ost->attachment_filename) {
3252 /* an attached file */
3253 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3254 ost->attachment_filename, ost->file_index, ost->index);
3258 if (ost->filter && ost->filter->graph->graph_desc) {
3259 /* output from a complex graph */
3260 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3261 if (nb_filtergraphs > 1)
3262 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3264 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3265 ost->index, ost->enc ? ost->enc->name : "?");
3269 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3270 input_streams[ost->source_index]->file_index,
3271 input_streams[ost->source_index]->st->index,
3274 if (ost->sync_ist != input_streams[ost->source_index])
3275 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3276 ost->sync_ist->file_index,
3277 ost->sync_ist->st->index);
3278 if (ost->stream_copy)
3279 av_log(NULL, AV_LOG_INFO, " (copy)");
3281 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3282 const AVCodec *out_codec = ost->enc;
3283 const char *decoder_name = "?";
3284 const char *in_codec_name = "?";
3285 const char *encoder_name = "?";
3286 const char *out_codec_name = "?";
3287 const AVCodecDescriptor *desc;
3290 decoder_name = in_codec->name;
3291 desc = avcodec_descriptor_get(in_codec->id);
3293 in_codec_name = desc->name;
3294 if (!strcmp(decoder_name, in_codec_name))
3295 decoder_name = "native";
3299 encoder_name = out_codec->name;
3300 desc = avcodec_descriptor_get(out_codec->id);
3302 out_codec_name = desc->name;
3303 if (!strcmp(encoder_name, out_codec_name))
3304 encoder_name = "native";
3307 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3308 in_codec_name, decoder_name,
3309 out_codec_name, encoder_name);
3311 av_log(NULL, AV_LOG_INFO, "\n");
3315 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3319 if (sdp_filename || want_sdp) {
3323 transcode_init_done = 1;
3328 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3329 static int need_output(void)
3333 for (i = 0; i < nb_output_streams; i++) {
3334 OutputStream *ost = output_streams[i];
3335 OutputFile *of = output_files[ost->file_index];
3336 AVFormatContext *os = output_files[ost->file_index]->ctx;
3338 if (ost->finished ||
3339 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3341 if (ost->frame_number >= ost->max_frames) {
3343 for (j = 0; j < of->ctx->nb_streams; j++)
3344 close_output_stream(output_streams[of->ost_index + j]);
3355 * Select the output stream to process.
3357 * @return selected output stream, or NULL if none available
3359 static OutputStream *choose_output(void)
3362 int64_t opts_min = INT64_MAX;
3363 OutputStream *ost_min = NULL;
3365 for (i = 0; i < nb_output_streams; i++) {
3366 OutputStream *ost = output_streams[i];
3367 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3369 if (!ost->finished && opts < opts_min) {
3371 ost_min = ost->unavailable ? NULL : ost;
3377 static int check_keyboard_interaction(int64_t cur_time)
3380 static int64_t last_time;
3381 if (received_nb_signals)
3382 return AVERROR_EXIT;
3383 /* read_key() returns 0 on EOF */
3384 if(cur_time - last_time >= 100000 && !run_as_daemon){
3386 last_time = cur_time;
3390 return AVERROR_EXIT;
3391 if (key == '+') av_log_set_level(av_log_get_level()+10);
3392 if (key == '-') av_log_set_level(av_log_get_level()-10);
3393 if (key == 's') qp_hist ^= 1;
3396 do_hex_dump = do_pkt_dump = 0;
3397 } else if(do_pkt_dump){
3401 av_log_set_level(AV_LOG_DEBUG);
3403 if (key == 'c' || key == 'C'){
3404 char buf[4096], target[64], command[256], arg[256] = {0};
3407 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3409 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3414 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3415 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3416 target, time, command, arg);
3417 for (i = 0; i < nb_filtergraphs; i++) {
3418 FilterGraph *fg = filtergraphs[i];
3421 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3422 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3423 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3424 } else if (key == 'c') {
3425 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3426 ret = AVERROR_PATCHWELCOME;
3428 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3430 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3435 av_log(NULL, AV_LOG_ERROR,
3436 "Parse error, at least 3 arguments were expected, "
3437 "only %d given in string '%s'\n", n, buf);
3440 if (key == 'd' || key == 'D'){
3443 debug = input_streams[0]->st->codec->debug<<1;
3444 if(!debug) debug = 1;
3445 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3448 if(scanf("%d", &debug)!=1)
3449 fprintf(stderr,"error parsing debug value\n");
3450 for(i=0;i<nb_input_streams;i++) {
3451 input_streams[i]->st->codec->debug = debug;
3453 for(i=0;i<nb_output_streams;i++) {
3454 OutputStream *ost = output_streams[i];
3455 ost->enc_ctx->debug = debug;
3457 if(debug) av_log_set_level(AV_LOG_DEBUG);
3458 fprintf(stderr,"debug=%d\n", debug);
3461 fprintf(stderr, "key function\n"
3462 "? show this help\n"
3463 "+ increase verbosity\n"
3464 "- decrease verbosity\n"
3465 "c Send command to first matching filter supporting it\n"
3466 "C Send/Que command to all matching filters\n"
3467 "D cycle through available debug modes\n"
3468 "h dump packets/hex press to cycle through the 3 states\n"
3470 "s Show QP histogram\n"
3477 static void *input_thread(void *arg)
3480 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3485 ret = av_read_frame(f->ctx, &pkt);
3487 if (ret == AVERROR(EAGAIN)) {
3492 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3495 av_dup_packet(&pkt);
3496 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3497 if (flags && ret == AVERROR(EAGAIN)) {
3499 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3500 av_log(f->ctx, AV_LOG_WARNING,
3501 "Thread message queue blocking; consider raising the "
3502 "thread_queue_size option (current value: %d)\n",
3503 f->thread_queue_size);
3506 if (ret != AVERROR_EOF)
3507 av_log(f->ctx, AV_LOG_ERROR,
3508 "Unable to send packet to main thread: %s\n",
3510 av_free_packet(&pkt);
3511 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3519 static void free_input_threads(void)
3523 for (i = 0; i < nb_input_files; i++) {
3524 InputFile *f = input_files[i];
3527 if (!f->in_thread_queue)
3529 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3530 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3531 av_free_packet(&pkt);
3533 pthread_join(f->thread, NULL);
3535 av_thread_message_queue_free(&f->in_thread_queue);
3539 static int init_input_threads(void)
3543 if (nb_input_files == 1)
3546 for (i = 0; i < nb_input_files; i++) {
3547 InputFile *f = input_files[i];
3549 if (f->ctx->pb ? !f->ctx->pb->seekable :
3550 strcmp(f->ctx->iformat->name, "lavfi"))
3551 f->non_blocking = 1;
3552 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3553 f->thread_queue_size, sizeof(AVPacket));
3557 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3558 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3559 av_thread_message_queue_free(&f->in_thread_queue);
3560 return AVERROR(ret);
3566 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3568 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3570 AV_THREAD_MESSAGE_NONBLOCK : 0);
3574 static int get_input_packet(InputFile *f, AVPacket *pkt)
3578 for (i = 0; i < f->nb_streams; i++) {
3579 InputStream *ist = input_streams[f->ist_index + i];
3580 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3581 int64_t now = av_gettime_relative() - ist->start;
3583 return AVERROR(EAGAIN);
3588 if (nb_input_files > 1)
3589 return get_input_packet_mt(f, pkt);
3591 return av_read_frame(f->ctx, pkt);
3594 static int got_eagain(void)
3597 for (i = 0; i < nb_output_streams; i++)
3598 if (output_streams[i]->unavailable)
3603 static void reset_eagain(void)
3606 for (i = 0; i < nb_input_files; i++)
3607 input_files[i]->eagain = 0;
3608 for (i = 0; i < nb_output_streams; i++)
3609 output_streams[i]->unavailable = 0;
3614 * - 0 -- one packet was read and processed
3615 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3616 * this function should be called again
3617 * - AVERROR_EOF -- this function should not be called again
3619 static int process_input(int file_index)
3621 InputFile *ifile = input_files[file_index];
3622 AVFormatContext *is;
3628 ret = get_input_packet(ifile, &pkt);
3630 if (ret == AVERROR(EAGAIN)) {
3635 if (ret != AVERROR_EOF) {
3636 print_error(is->filename, ret);
3641 for (i = 0; i < ifile->nb_streams; i++) {
3642 ist = input_streams[ifile->ist_index + i];
3643 if (ist->decoding_needed) {
3644 ret = process_input_packet(ist, NULL);
3649 /* mark all outputs that don't go through lavfi as finished */
3650 for (j = 0; j < nb_output_streams; j++) {
3651 OutputStream *ost = output_streams[j];
3653 if (ost->source_index == ifile->ist_index + i &&
3654 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3655 finish_output_stream(ost);
3659 ifile->eof_reached = 1;
3660 return AVERROR(EAGAIN);
3666 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3667 is->streams[pkt.stream_index]);
3669 /* the following test is needed in case new streams appear
3670 dynamically in stream : we ignore them */
3671 if (pkt.stream_index >= ifile->nb_streams) {
3672 report_new_stream(file_index, &pkt);
3673 goto discard_packet;
3676 ist = input_streams[ifile->ist_index + pkt.stream_index];
3678 ist->data_size += pkt.size;
3682 goto discard_packet;
3685 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3686 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3687 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3688 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3689 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3690 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3691 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3692 av_ts2str(input_files[ist->file_index]->ts_offset),
3693 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3696 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3697 int64_t stime, stime2;
3698 // Correcting starttime based on the enabled streams
3699 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3700 // so we instead do it here as part of discontinuity handling
3701 if ( ist->next_dts == AV_NOPTS_VALUE
3702 && ifile->ts_offset == -is->start_time
3703 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3704 int64_t new_start_time = INT64_MAX;
3705 for (i=0; i<is->nb_streams; i++) {
3706 AVStream *st = is->streams[i];
3707 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3709 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3711 if (new_start_time > is->start_time) {
3712 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3713 ifile->ts_offset = -new_start_time;
3717 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3718 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3719 ist->wrap_correction_done = 1;
3721 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3722 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3723 ist->wrap_correction_done = 0;
3725 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3726 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3727 ist->wrap_correction_done = 0;
3731 /* add the stream-global side data to the first packet */
3732 if (ist->nb_packets == 1) {
3733 if (ist->st->nb_side_data)
3734 av_packet_split_side_data(&pkt);
3735 for (i = 0; i < ist->st->nb_side_data; i++) {
3736 AVPacketSideData *src_sd = &ist->st->side_data[i];
3739 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3741 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3744 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3748 memcpy(dst_data, src_sd->data, src_sd->size);
3752 if (pkt.dts != AV_NOPTS_VALUE)
3753 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3754 if (pkt.pts != AV_NOPTS_VALUE)
3755 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3757 if (pkt.pts != AV_NOPTS_VALUE)
3758 pkt.pts *= ist->ts_scale;
3759 if (pkt.dts != AV_NOPTS_VALUE)
3760 pkt.dts *= ist->ts_scale;
3762 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3763 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3764 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3765 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3766 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3767 int64_t delta = pkt_dts - ifile->last_ts;
3768 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3769 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3770 ifile->ts_offset -= delta;
3771 av_log(NULL, AV_LOG_DEBUG,
3772 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3773 delta, ifile->ts_offset);
3774 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3775 if (pkt.pts != AV_NOPTS_VALUE)
3776 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3780 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3781 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3782 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3784 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3785 int64_t delta = pkt_dts - ist->next_dts;
3786 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3787 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3788 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3789 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3790 ifile->ts_offset -= delta;
3791 av_log(NULL, AV_LOG_DEBUG,
3792 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3793 delta, ifile->ts_offset);
3794 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3795 if (pkt.pts != AV_NOPTS_VALUE)
3796 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3799 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3800 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3801 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3802 pkt.dts = AV_NOPTS_VALUE;
3804 if (pkt.pts != AV_NOPTS_VALUE){
3805 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3806 delta = pkt_pts - ist->next_dts;
3807 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3808 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3809 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3810 pkt.pts = AV_NOPTS_VALUE;
3816 if (pkt.dts != AV_NOPTS_VALUE)
3817 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3820 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3821 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3822 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3823 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3824 av_ts2str(input_files[ist->file_index]->ts_offset),
3825 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3828 sub2video_heartbeat(ist, pkt.pts);
3830 process_input_packet(ist, &pkt);
3833 av_free_packet(&pkt);
3839 * Perform a step of transcoding for the specified filter graph.
3841 * @param[in] graph filter graph to consider
3842 * @param[out] best_ist input stream where a frame would allow to continue
3843 * @return 0 for success, <0 for error
3845 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3848 int nb_requests, nb_requests_max = 0;
3849 InputFilter *ifilter;
3853 ret = avfilter_graph_request_oldest(graph->graph);
3855 return reap_filters(0);
3857 if (ret == AVERROR_EOF) {
3858 ret = reap_filters(1);
3859 for (i = 0; i < graph->nb_outputs; i++)
3860 close_output_stream(graph->outputs[i]->ost);
3863 if (ret != AVERROR(EAGAIN))
3866 for (i = 0; i < graph->nb_inputs; i++) {
3867 ifilter = graph->inputs[i];
3869 if (input_files[ist->file_index]->eagain ||
3870 input_files[ist->file_index]->eof_reached)
3872 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3873 if (nb_requests > nb_requests_max) {
3874 nb_requests_max = nb_requests;
3880 for (i = 0; i < graph->nb_outputs; i++)
3881 graph->outputs[i]->ost->unavailable = 1;
3887 * Run a single step of transcoding.
3889 * @return 0 for success, <0 for error
3891 static int transcode_step(void)
3897 ost = choose_output();
3904 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3909 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3914 av_assert0(ost->source_index >= 0);
3915 ist = input_streams[ost->source_index];
3918 ret = process_input(ist->file_index);
3919 if (ret == AVERROR(EAGAIN)) {
3920 if (input_files[ist->file_index]->eagain)
3921 ost->unavailable = 1;
3926 return ret == AVERROR_EOF ? 0 : ret;
3928 return reap_filters(0);
3932 * The following code is the main loop of the file converter
3934 static int transcode(void)
3937 AVFormatContext *os;
3940 int64_t timer_start;
3942 ret = transcode_init();
3946 if (stdin_interaction) {
3947 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3950 timer_start = av_gettime_relative();
3953 if ((ret = init_input_threads()) < 0)
3957 while (!received_sigterm) {
3958 int64_t cur_time= av_gettime_relative();
3960 /* if 'q' pressed, exits */
3961 if (stdin_interaction)
3962 if (check_keyboard_interaction(cur_time) < 0)
3965 /* check if there's any stream where output is still needed */
3966 if (!need_output()) {
3967 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3971 ret = transcode_step();
3973 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3977 av_strerror(ret, errbuf, sizeof(errbuf));
3979 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3984 /* dump report by using the output first video and audio streams */
3985 print_report(0, timer_start, cur_time);
3988 free_input_threads();
3991 /* at the end of stream, we must flush the decoder buffers */
3992 for (i = 0; i < nb_input_streams; i++) {
3993 ist = input_streams[i];
3994 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3995 process_input_packet(ist, NULL);
4002 /* write the trailer if needed and close file */
4003 for (i = 0; i < nb_output_files; i++) {
4004 os = output_files[i]->ctx;
4005 av_write_trailer(os);
4008 /* dump report by using the first video and audio streams */
4009 print_report(1, timer_start, av_gettime_relative());
4011 /* close each encoder */
4012 for (i = 0; i < nb_output_streams; i++) {
4013 ost = output_streams[i];
4014 if (ost->encoding_needed) {
4015 av_freep(&ost->enc_ctx->stats_in);
4019 /* close each decoder */
4020 for (i = 0; i < nb_input_streams; i++) {
4021 ist = input_streams[i];
4022 if (ist->decoding_needed) {
4023 avcodec_close(ist->dec_ctx);
4024 if (ist->hwaccel_uninit)
4025 ist->hwaccel_uninit(ist->dec_ctx);
4034 free_input_threads();
4037 if (output_streams) {
4038 for (i = 0; i < nb_output_streams; i++) {
4039 ost = output_streams[i];
4042 fclose(ost->logfile);
4043 ost->logfile = NULL;
4045 av_freep(&ost->forced_kf_pts);
4046 av_freep(&ost->apad);
4047 av_freep(&ost->disposition);
4048 av_dict_free(&ost->encoder_opts);
4049 av_dict_free(&ost->swr_opts);
4050 av_dict_free(&ost->resample_opts);
4051 av_dict_free(&ost->bsf_args);
4059 static int64_t getutime(void)
4062 struct rusage rusage;
4064 getrusage(RUSAGE_SELF, &rusage);
4065 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4066 #elif HAVE_GETPROCESSTIMES
4068 FILETIME c, e, k, u;
4069 proc = GetCurrentProcess();
4070 GetProcessTimes(proc, &c, &e, &k, &u);
4071 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4073 return av_gettime_relative();
4077 static int64_t getmaxrss(void)
4079 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4080 struct rusage rusage;
4081 getrusage(RUSAGE_SELF, &rusage);
4082 return (int64_t)rusage.ru_maxrss * 1024;
4083 #elif HAVE_GETPROCESSMEMORYINFO
4085 PROCESS_MEMORY_COUNTERS memcounters;
4086 proc = GetCurrentProcess();
4087 memcounters.cb = sizeof(memcounters);
4088 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4089 return memcounters.PeakPagefileUsage;
4095 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4099 int main(int argc, char **argv)
4104 register_exit(ffmpeg_cleanup);
4106 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4108 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4109 parse_loglevel(argc, argv, options);
4111 if(argc>1 && !strcmp(argv[1], "-d")){
4113 av_log_set_callback(log_callback_null);
4118 avcodec_register_all();
4120 avdevice_register_all();
4122 avfilter_register_all();
4124 avformat_network_init();
4126 show_banner(argc, argv, options);
4130 /* parse options and open all input/output files */
4131 ret = ffmpeg_parse_options(argc, argv);
4135 if (nb_output_files <= 0 && nb_input_files == 0) {
4137 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4141 /* file converter / grab */
4142 if (nb_output_files <= 0) {
4143 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4147 // if (nb_input_files == 0) {
4148 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4152 current_time = ti = getutime();
4153 if (transcode() < 0)
4155 ti = getutime() - ti;
4157 printf("bench: utime=%0.3fs\n", ti / 1000000.0);
4159 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4160 decode_error_stat[0], decode_error_stat[1]);
4161 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4164 exit_program(received_nb_signals ? 255 : main_return_code);
4165 return main_return_code;