2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avcodec.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static int nb_frames_drop = 0;
133 static int64_t decode_error_stat[2];
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
203 src = r->pict.data[0];
204 pal = (uint32_t *)r->pict.data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->pict.linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 istty = isatty(0) && isatty(2);
378 if (istty && tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > transcode_init_done;
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 av_freep(&fg->inputs[j]->name);
480 av_freep(&fg->inputs[j]);
482 av_freep(&fg->inputs);
483 for (j = 0; j < fg->nb_outputs; j++) {
484 av_freep(&fg->outputs[j]->name);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
512 AVBitStreamFilterContext *bsfc;
517 bsfc = ost->bitstream_filters;
519 AVBitStreamFilterContext *next = bsfc->next;
520 av_bitstream_filter_close(bsfc);
523 ost->bitstream_filters = NULL;
524 av_frame_free(&ost->filtered_frame);
525 av_frame_free(&ost->last_frame);
527 av_parser_close(ost->parser);
529 av_freep(&ost->forced_keyframes);
530 av_expr_free(ost->forced_keyframes_pexpr);
531 av_freep(&ost->avfilter);
532 av_freep(&ost->logfile_prefix);
534 av_freep(&ost->audio_channels_map);
535 ost->audio_channels_mapped = 0;
537 avcodec_free_context(&ost->enc_ctx);
539 av_freep(&output_streams[i]);
542 free_input_threads();
544 for (i = 0; i < nb_input_files; i++) {
545 avformat_close_input(&input_files[i]->ctx);
546 av_freep(&input_files[i]);
548 for (i = 0; i < nb_input_streams; i++) {
549 InputStream *ist = input_streams[i];
551 av_frame_free(&ist->decoded_frame);
552 av_frame_free(&ist->filter_frame);
553 av_dict_free(&ist->decoder_opts);
554 avsubtitle_free(&ist->prev_sub.subtitle);
555 av_frame_free(&ist->sub2video.frame);
556 av_freep(&ist->filters);
557 av_freep(&ist->hwaccel_device);
559 avcodec_free_context(&ist->dec_ctx);
561 av_freep(&input_streams[i]);
566 av_freep(&vstats_filename);
568 av_freep(&input_streams);
569 av_freep(&input_files);
570 av_freep(&output_streams);
571 av_freep(&output_files);
575 avformat_network_deinit();
577 if (received_sigterm) {
578 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579 (int) received_sigterm);
580 } else if (ret && transcode_init_done) {
581 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
587 void remove_avoptions(AVDictionary **a, AVDictionary *b)
589 AVDictionaryEntry *t = NULL;
591 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
596 void assert_avoptions(AVDictionary *m)
598 AVDictionaryEntry *t;
599 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
605 static void abort_codec_experimental(AVCodec *c, int encoder)
610 static void update_benchmark(const char *fmt, ...)
612 if (do_benchmark_all) {
613 int64_t t = getutime();
619 vsnprintf(buf, sizeof(buf), fmt, va);
621 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
630 for (i = 0; i < nb_output_streams; i++) {
631 OutputStream *ost2 = output_streams[i];
632 ost2->finished |= ost == ost2 ? this_stream : others;
636 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
638 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
639 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
642 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
643 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
644 if (ost->st->codec->extradata) {
645 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
646 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
650 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
651 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
652 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
655 * Audio encoders may split the packets -- #frames in != #packets out.
656 * But there is no reordering, so we can limit the number of output packets
657 * by simply dropping them here.
658 * Counting encoded video frames needs to be done separately because of
659 * reordering, see do_video_out()
661 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
662 if (ost->frame_number >= ost->max_frames) {
668 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
670 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
672 ost->quality = sd ? AV_RL32(sd) : -1;
673 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
675 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
677 ost->error[i] = AV_RL64(sd + 8 + 8*i);
684 av_packet_split_side_data(pkt);
687 AVPacket new_pkt = *pkt;
688 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
691 int a = av_bitstream_filter_filter(bsfc, avctx,
692 bsf_arg ? bsf_arg->value : NULL,
693 &new_pkt.data, &new_pkt.size,
694 pkt->data, pkt->size,
695 pkt->flags & AV_PKT_FLAG_KEY);
696 FF_DISABLE_DEPRECATION_WARNINGS
697 if(a == 0 && new_pkt.data != pkt->data
698 #if FF_API_DESTRUCT_PACKET
702 FF_ENABLE_DEPRECATION_WARNINGS
703 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
705 memcpy(t, new_pkt.data, new_pkt.size);
706 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
714 pkt->side_data = NULL;
715 pkt->side_data_elems = 0;
717 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
718 av_buffer_default_free, NULL, 0);
723 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
724 bsfc->filter->name, pkt->stream_index,
725 avctx->codec ? avctx->codec->name : "copy");
735 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
736 if (pkt->dts != AV_NOPTS_VALUE &&
737 pkt->pts != AV_NOPTS_VALUE &&
738 pkt->dts > pkt->pts) {
739 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
741 ost->file_index, ost->st->index);
743 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
744 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
745 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
748 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
749 pkt->dts != AV_NOPTS_VALUE &&
750 ost->last_mux_dts != AV_NOPTS_VALUE) {
751 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
752 if (pkt->dts < max) {
753 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
754 av_log(s, loglevel, "Non-monotonous DTS in output stream "
755 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
756 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
758 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
761 av_log(s, loglevel, "changing to %"PRId64". This may result "
762 "in incorrect timestamps in the output file.\n",
764 if(pkt->pts >= pkt->dts)
765 pkt->pts = FFMAX(pkt->pts, max);
770 ost->last_mux_dts = pkt->dts;
772 ost->data_size += pkt->size;
773 ost->packets_written++;
775 pkt->stream_index = ost->index;
778 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
779 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
780 av_get_media_type_string(ost->enc_ctx->codec_type),
781 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
782 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
787 ret = av_interleaved_write_frame(s, pkt);
789 print_error("av_interleaved_write_frame()", ret);
790 main_return_code = 1;
791 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
796 static void close_output_stream(OutputStream *ost)
798 OutputFile *of = output_files[ost->file_index];
800 ost->finished |= ENCODER_FINISHED;
802 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
803 of->recording_time = FFMIN(of->recording_time, end);
807 static int check_recording_time(OutputStream *ost)
809 OutputFile *of = output_files[ost->file_index];
811 if (of->recording_time != INT64_MAX &&
812 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
813 AV_TIME_BASE_Q) >= 0) {
814 close_output_stream(ost);
820 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
823 AVCodecContext *enc = ost->enc_ctx;
827 av_init_packet(&pkt);
831 if (!check_recording_time(ost))
834 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
835 frame->pts = ost->sync_opts;
836 ost->sync_opts = frame->pts + frame->nb_samples;
837 ost->samples_encoded += frame->nb_samples;
838 ost->frames_encoded++;
840 av_assert0(pkt.size || !pkt.data);
841 update_benchmark(NULL);
843 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
844 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
845 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
846 enc->time_base.num, enc->time_base.den);
849 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
850 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
853 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
856 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
859 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
860 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
861 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
862 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
865 write_frame(s, &pkt, ost);
869 static void do_subtitle_out(AVFormatContext *s,
874 int subtitle_out_max_size = 1024 * 1024;
875 int subtitle_out_size, nb, i;
880 if (sub->pts == AV_NOPTS_VALUE) {
881 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
890 subtitle_out = av_malloc(subtitle_out_max_size);
892 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
897 /* Note: DVB subtitle need one packet to draw them and one other
898 packet to clear them */
899 /* XXX: signal it in the codec context ? */
900 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
905 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
907 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
908 pts -= output_files[ost->file_index]->start_time;
909 for (i = 0; i < nb; i++) {
910 unsigned save_num_rects = sub->num_rects;
912 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
913 if (!check_recording_time(ost))
917 // start_display_time is required to be 0
918 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
919 sub->end_display_time -= sub->start_display_time;
920 sub->start_display_time = 0;
924 ost->frames_encoded++;
926 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
927 subtitle_out_max_size, sub);
929 sub->num_rects = save_num_rects;
930 if (subtitle_out_size < 0) {
931 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
935 av_init_packet(&pkt);
936 pkt.data = subtitle_out;
937 pkt.size = subtitle_out_size;
938 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
939 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
940 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
941 /* XXX: the pts correction is handled here. Maybe handling
942 it in the codec would be better */
944 pkt.pts += 90 * sub->start_display_time;
946 pkt.pts += 90 * sub->end_display_time;
949 write_frame(s, &pkt, ost);
953 static void do_video_out(AVFormatContext *s,
955 AVFrame *next_picture,
958 int ret, format_video_sync;
960 AVCodecContext *enc = ost->enc_ctx;
961 AVCodecContext *mux_enc = ost->st->codec;
962 int nb_frames, nb0_frames, i;
963 double delta, delta0;
966 InputStream *ist = NULL;
967 AVFilterContext *filter = ost->filter->filter;
969 if (ost->source_index >= 0)
970 ist = input_streams[ost->source_index];
972 if (filter->inputs[0]->frame_rate.num > 0 &&
973 filter->inputs[0]->frame_rate.den > 0)
974 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
976 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
977 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
979 if (!ost->filters_script &&
983 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
984 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
989 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
990 ost->last_nb0_frames[1],
991 ost->last_nb0_frames[2]);
993 delta0 = sync_ipts - ost->sync_opts;
994 delta = delta0 + duration;
996 /* by default, we output a single frame */
1000 format_video_sync = video_sync_method;
1001 if (format_video_sync == VSYNC_AUTO) {
1002 if(!strcmp(s->oformat->name, "avi")) {
1003 format_video_sync = VSYNC_VFR;
1005 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1007 && format_video_sync == VSYNC_CFR
1008 && input_files[ist->file_index]->ctx->nb_streams == 1
1009 && input_files[ist->file_index]->input_ts_offset == 0) {
1010 format_video_sync = VSYNC_VSCFR;
1012 if (format_video_sync == VSYNC_CFR && copy_ts) {
1013 format_video_sync = VSYNC_VSCFR;
1019 format_video_sync != VSYNC_PASSTHROUGH &&
1020 format_video_sync != VSYNC_DROP) {
1021 double cor = FFMIN(-delta0, duration);
1022 if (delta0 < -0.6) {
1023 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1025 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1031 switch (format_video_sync) {
1033 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1034 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1037 ost->sync_opts = lrint(sync_ipts);
1040 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1041 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1043 } else if (delta < -1.1)
1045 else if (delta > 1.1) {
1046 nb_frames = lrintf(delta);
1048 nb0_frames = lrintf(delta0 - 0.6);
1054 else if (delta > 0.6)
1055 ost->sync_opts = lrint(sync_ipts);
1058 case VSYNC_PASSTHROUGH:
1059 ost->sync_opts = lrint(sync_ipts);
1066 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1067 nb0_frames = FFMIN(nb0_frames, nb_frames);
1069 memmove(ost->last_nb0_frames + 1,
1070 ost->last_nb0_frames,
1071 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1072 ost->last_nb0_frames[0] = nb0_frames;
1074 if (nb0_frames == 0 && ost->last_droped) {
1076 av_log(NULL, AV_LOG_VERBOSE,
1077 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1078 ost->frame_number, ost->st->index, ost->last_frame->pts);
1080 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1081 if (nb_frames > dts_error_threshold * 30) {
1082 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1086 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1087 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1089 ost->last_droped = nb_frames == nb0_frames && next_picture;
1091 /* duplicates frame if needed */
1092 for (i = 0; i < nb_frames; i++) {
1093 AVFrame *in_picture;
1094 av_init_packet(&pkt);
1098 if (i < nb0_frames && ost->last_frame) {
1099 in_picture = ost->last_frame;
1101 in_picture = next_picture;
1106 in_picture->pts = ost->sync_opts;
1109 if (!check_recording_time(ost))
1111 if (ost->frame_number >= ost->max_frames)
1115 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1116 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1117 /* raw pictures are written as AVPicture structure to
1118 avoid any copies. We support temporarily the older
1120 if (in_picture->interlaced_frame)
1121 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1123 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1124 pkt.data = (uint8_t *)in_picture;
1125 pkt.size = sizeof(AVPicture);
1126 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1127 pkt.flags |= AV_PKT_FLAG_KEY;
1129 write_frame(s, &pkt, ost);
1131 int got_packet, forced_keyframe = 0;
1134 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1135 ost->top_field_first >= 0)
1136 in_picture->top_field_first = !!ost->top_field_first;
1138 if (in_picture->interlaced_frame) {
1139 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1140 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1142 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1144 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1146 in_picture->quality = enc->global_quality;
1147 in_picture->pict_type = 0;
1149 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1150 in_picture->pts * av_q2d(enc->time_base) : NAN;
1151 if (ost->forced_kf_index < ost->forced_kf_count &&
1152 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1153 ost->forced_kf_index++;
1154 forced_keyframe = 1;
1155 } else if (ost->forced_keyframes_pexpr) {
1157 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1158 res = av_expr_eval(ost->forced_keyframes_pexpr,
1159 ost->forced_keyframes_expr_const_values, NULL);
1160 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1161 ost->forced_keyframes_expr_const_values[FKF_N],
1162 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1163 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1164 ost->forced_keyframes_expr_const_values[FKF_T],
1165 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1168 forced_keyframe = 1;
1169 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1170 ost->forced_keyframes_expr_const_values[FKF_N];
1171 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1172 ost->forced_keyframes_expr_const_values[FKF_T];
1173 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1176 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1177 } else if ( ost->forced_keyframes
1178 && !strncmp(ost->forced_keyframes, "source", 6)
1179 && in_picture->key_frame==1) {
1180 forced_keyframe = 1;
1183 if (forced_keyframe) {
1184 in_picture->pict_type = AV_PICTURE_TYPE_I;
1185 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1188 update_benchmark(NULL);
1190 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1191 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1192 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1193 enc->time_base.num, enc->time_base.den);
1196 ost->frames_encoded++;
1198 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1199 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1201 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1207 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1208 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1209 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1210 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1213 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1214 pkt.pts = ost->sync_opts;
1216 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1219 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1220 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1221 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1222 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1225 frame_size = pkt.size;
1226 write_frame(s, &pkt, ost);
1228 /* if two pass, output log */
1229 if (ost->logfile && enc->stats_out) {
1230 fprintf(ost->logfile, "%s", enc->stats_out);
1236 * For video, number of frames in == number of packets out.
1237 * But there may be reordering, so we can't throw away frames on encoder
1238 * flush, we need to limit them here, before they go into encoder.
1240 ost->frame_number++;
1242 if (vstats_filename && frame_size)
1243 do_video_stats(ost, frame_size);
1246 if (!ost->last_frame)
1247 ost->last_frame = av_frame_alloc();
1248 av_frame_unref(ost->last_frame);
1249 if (next_picture && ost->last_frame)
1250 av_frame_ref(ost->last_frame, next_picture);
1252 av_frame_free(&ost->last_frame);
1255 static double psnr(double d)
1257 return -10.0 * log(d) / log(10.0);
1260 static void do_video_stats(OutputStream *ost, int frame_size)
1262 AVCodecContext *enc;
1264 double ti1, bitrate, avg_bitrate;
1266 /* this is executed just the first time do_video_stats is called */
1268 vstats_file = fopen(vstats_filename, "w");
1276 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1277 frame_number = ost->st->nb_frames;
1278 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1279 ost->quality / (float)FF_QP2LAMBDA);
1281 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1282 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1284 fprintf(vstats_file,"f_size= %6d ", frame_size);
1285 /* compute pts value */
1286 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1290 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1291 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1292 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1293 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1294 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1298 static void finish_output_stream(OutputStream *ost)
1300 OutputFile *of = output_files[ost->file_index];
1303 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1306 for (i = 0; i < of->ctx->nb_streams; i++)
1307 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1312 * Get and encode new output from any of the filtergraphs, without causing
1315 * @return 0 for success, <0 for severe errors
1317 static int reap_filters(int flush)
1319 AVFrame *filtered_frame = NULL;
1322 /* Reap all buffers present in the buffer sinks */
1323 for (i = 0; i < nb_output_streams; i++) {
1324 OutputStream *ost = output_streams[i];
1325 OutputFile *of = output_files[ost->file_index];
1326 AVFilterContext *filter;
1327 AVCodecContext *enc = ost->enc_ctx;
1332 filter = ost->filter->filter;
1334 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1335 return AVERROR(ENOMEM);
1337 filtered_frame = ost->filtered_frame;
1340 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1341 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1342 AV_BUFFERSINK_FLAG_NO_REQUEST);
1344 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1345 av_log(NULL, AV_LOG_WARNING,
1346 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1347 } else if (flush && ret == AVERROR_EOF) {
1348 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1349 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1353 if (ost->finished) {
1354 av_frame_unref(filtered_frame);
1357 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1358 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1359 AVRational tb = enc->time_base;
1360 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1362 tb.den <<= extra_bits;
1364 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1365 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1366 float_pts /= 1 << extra_bits;
1367 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1368 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1370 filtered_frame->pts =
1371 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1372 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1374 //if (ost->source_index >= 0)
1375 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1377 switch (filter->inputs[0]->type) {
1378 case AVMEDIA_TYPE_VIDEO:
1379 if (!ost->frame_aspect_ratio.num)
1380 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1383 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1384 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1386 enc->time_base.num, enc->time_base.den);
1389 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1391 case AVMEDIA_TYPE_AUDIO:
1392 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1393 enc->channels != av_frame_get_channels(filtered_frame)) {
1394 av_log(NULL, AV_LOG_ERROR,
1395 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1398 do_audio_out(of->ctx, ost, filtered_frame);
1401 // TODO support subtitle filters
1405 av_frame_unref(filtered_frame);
1412 static void print_final_stats(int64_t total_size)
1414 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1415 uint64_t subtitle_size = 0;
1416 uint64_t data_size = 0;
1417 float percent = -1.0;
1421 for (i = 0; i < nb_output_streams; i++) {
1422 OutputStream *ost = output_streams[i];
1423 switch (ost->enc_ctx->codec_type) {
1424 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1425 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1426 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1427 default: other_size += ost->data_size; break;
1429 extra_size += ost->enc_ctx->extradata_size;
1430 data_size += ost->data_size;
1431 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1432 != AV_CODEC_FLAG_PASS1)
1436 if (data_size && total_size>0 && total_size >= data_size)
1437 percent = 100.0 * (total_size - data_size) / data_size;
1439 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1440 video_size / 1024.0,
1441 audio_size / 1024.0,
1442 subtitle_size / 1024.0,
1443 other_size / 1024.0,
1444 extra_size / 1024.0);
1446 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1448 av_log(NULL, AV_LOG_INFO, "unknown");
1449 av_log(NULL, AV_LOG_INFO, "\n");
1451 /* print verbose per-stream stats */
1452 for (i = 0; i < nb_input_files; i++) {
1453 InputFile *f = input_files[i];
1454 uint64_t total_packets = 0, total_size = 0;
1456 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1457 i, f->ctx->filename);
1459 for (j = 0; j < f->nb_streams; j++) {
1460 InputStream *ist = input_streams[f->ist_index + j];
1461 enum AVMediaType type = ist->dec_ctx->codec_type;
1463 total_size += ist->data_size;
1464 total_packets += ist->nb_packets;
1466 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1467 i, j, media_type_string(type));
1468 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1469 ist->nb_packets, ist->data_size);
1471 if (ist->decoding_needed) {
1472 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1473 ist->frames_decoded);
1474 if (type == AVMEDIA_TYPE_AUDIO)
1475 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1476 av_log(NULL, AV_LOG_VERBOSE, "; ");
1479 av_log(NULL, AV_LOG_VERBOSE, "\n");
1482 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1483 total_packets, total_size);
1486 for (i = 0; i < nb_output_files; i++) {
1487 OutputFile *of = output_files[i];
1488 uint64_t total_packets = 0, total_size = 0;
1490 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1491 i, of->ctx->filename);
1493 for (j = 0; j < of->ctx->nb_streams; j++) {
1494 OutputStream *ost = output_streams[of->ost_index + j];
1495 enum AVMediaType type = ost->enc_ctx->codec_type;
1497 total_size += ost->data_size;
1498 total_packets += ost->packets_written;
1500 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1501 i, j, media_type_string(type));
1502 if (ost->encoding_needed) {
1503 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1504 ost->frames_encoded);
1505 if (type == AVMEDIA_TYPE_AUDIO)
1506 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1507 av_log(NULL, AV_LOG_VERBOSE, "; ");
1510 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1511 ost->packets_written, ost->data_size);
1513 av_log(NULL, AV_LOG_VERBOSE, "\n");
1516 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1517 total_packets, total_size);
1519 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1520 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1522 av_log(NULL, AV_LOG_WARNING, "\n");
1524 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1529 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1532 AVBPrint buf_script;
1534 AVFormatContext *oc;
1536 AVCodecContext *enc;
1537 int frame_number, vid, i;
1539 int64_t pts = INT64_MIN;
1540 static int64_t last_time = -1;
1541 static int qp_histogram[52];
1542 int hours, mins, secs, us;
1544 if (!print_stats && !is_last_report && !progress_avio)
1547 if (!is_last_report) {
1548 if (last_time == -1) {
1549 last_time = cur_time;
1552 if ((cur_time - last_time) < 500000)
1554 last_time = cur_time;
1558 oc = output_files[0]->ctx;
1560 total_size = avio_size(oc->pb);
1561 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1562 total_size = avio_tell(oc->pb);
1566 av_bprint_init(&buf_script, 0, 1);
1567 for (i = 0; i < nb_output_streams; i++) {
1569 ost = output_streams[i];
1571 if (!ost->stream_copy)
1572 q = ost->quality / (float) FF_QP2LAMBDA;
1574 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1575 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1576 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1577 ost->file_index, ost->index, q);
1579 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1580 float fps, t = (cur_time-timer_start) / 1000000.0;
1582 frame_number = ost->frame_number;
1583 fps = t > 1 ? frame_number / t : 0;
1584 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1585 frame_number, fps < 9.95, fps, q);
1586 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1587 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1588 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1589 ost->file_index, ost->index, q);
1591 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1595 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1597 for (j = 0; j < 32; j++)
1598 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1601 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1603 double error, error_sum = 0;
1604 double scale, scale_sum = 0;
1606 char type[3] = { 'Y','U','V' };
1607 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1608 for (j = 0; j < 3; j++) {
1609 if (is_last_report) {
1610 error = enc->error[j];
1611 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1613 error = ost->error[j];
1614 scale = enc->width * enc->height * 255.0 * 255.0;
1620 p = psnr(error / scale);
1621 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1622 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1623 ost->file_index, ost->index, type[j] | 32, p);
1625 p = psnr(error_sum / scale_sum);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1627 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1628 ost->file_index, ost->index, p);
1632 /* compute min output value */
1633 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1634 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1635 ost->st->time_base, AV_TIME_BASE_Q));
1637 nb_frames_drop += ost->last_droped;
1640 secs = FFABS(pts) / AV_TIME_BASE;
1641 us = FFABS(pts) % AV_TIME_BASE;
1647 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1649 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1651 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1652 "size=%8.0fkB time=", total_size / 1024.0);
1654 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1656 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1657 (100 * us) / AV_TIME_BASE);
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1661 av_bprintf(&buf_script, "bitrate=N/A\n");
1663 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1664 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1667 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1668 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1669 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1670 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1671 hours, mins, secs, us);
1673 if (nb_frames_dup || nb_frames_drop)
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1675 nb_frames_dup, nb_frames_drop);
1676 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1677 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1679 if (print_stats || is_last_report) {
1680 const char end = is_last_report ? '\n' : '\r';
1681 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1682 fprintf(stderr, "%s %c", buf, end);
1684 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1689 if (progress_avio) {
1690 av_bprintf(&buf_script, "progress=%s\n",
1691 is_last_report ? "end" : "continue");
1692 avio_write(progress_avio, buf_script.str,
1693 FFMIN(buf_script.len, buf_script.size - 1));
1694 avio_flush(progress_avio);
1695 av_bprint_finalize(&buf_script, NULL);
1696 if (is_last_report) {
1697 avio_closep(&progress_avio);
1702 print_final_stats(total_size);
1705 static void flush_encoders(void)
1709 for (i = 0; i < nb_output_streams; i++) {
1710 OutputStream *ost = output_streams[i];
1711 AVCodecContext *enc = ost->enc_ctx;
1712 AVFormatContext *os = output_files[ost->file_index]->ctx;
1713 int stop_encoding = 0;
1715 if (!ost->encoding_needed)
1718 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1720 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1724 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1727 switch (enc->codec_type) {
1728 case AVMEDIA_TYPE_AUDIO:
1729 encode = avcodec_encode_audio2;
1732 case AVMEDIA_TYPE_VIDEO:
1733 encode = avcodec_encode_video2;
1744 av_init_packet(&pkt);
1748 update_benchmark(NULL);
1749 ret = encode(enc, &pkt, NULL, &got_packet);
1750 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1752 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1757 if (ost->logfile && enc->stats_out) {
1758 fprintf(ost->logfile, "%s", enc->stats_out);
1764 if (ost->finished & MUXER_FINISHED) {
1765 av_free_packet(&pkt);
1768 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1769 pkt_size = pkt.size;
1770 write_frame(os, &pkt, ost);
1771 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1772 do_video_stats(ost, pkt_size);
1783 * Check whether a packet from ist should be written into ost at this time
1785 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1787 OutputFile *of = output_files[ost->file_index];
1788 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1790 if (ost->source_index != ist_index)
1796 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1802 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1804 OutputFile *of = output_files[ost->file_index];
1805 InputFile *f = input_files [ist->file_index];
1806 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1807 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1808 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1812 av_init_packet(&opkt);
1814 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1815 !ost->copy_initial_nonkeyframes)
1818 if (pkt->pts == AV_NOPTS_VALUE) {
1819 if (!ost->frame_number && ist->pts < start_time &&
1820 !ost->copy_prior_start)
1823 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1824 !ost->copy_prior_start)
1828 if (of->recording_time != INT64_MAX &&
1829 ist->pts >= of->recording_time + start_time) {
1830 close_output_stream(ost);
1834 if (f->recording_time != INT64_MAX) {
1835 start_time = f->ctx->start_time;
1836 if (f->start_time != AV_NOPTS_VALUE)
1837 start_time += f->start_time;
1838 if (ist->pts >= f->recording_time + start_time) {
1839 close_output_stream(ost);
1844 /* force the input stream PTS */
1845 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1848 if (pkt->pts != AV_NOPTS_VALUE)
1849 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1851 opkt.pts = AV_NOPTS_VALUE;
1853 if (pkt->dts == AV_NOPTS_VALUE)
1854 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1856 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1857 opkt.dts -= ost_tb_start_time;
1859 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1860 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1862 duration = ist->dec_ctx->frame_size;
1863 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1864 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1865 ost->st->time_base) - ost_tb_start_time;
1868 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1869 opkt.flags = pkt->flags;
1870 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1871 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1872 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1873 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1874 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1876 int ret = av_parser_change(ost->parser, ost->st->codec,
1877 &opkt.data, &opkt.size,
1878 pkt->data, pkt->size,
1879 pkt->flags & AV_PKT_FLAG_KEY);
1881 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1886 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1891 opkt.data = pkt->data;
1892 opkt.size = pkt->size;
1894 av_copy_packet_side_data(&opkt, pkt);
1896 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1897 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1898 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1899 /* store AVPicture in AVPacket, as expected by the output format */
1900 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1902 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1906 opkt.data = (uint8_t *)&pict;
1907 opkt.size = sizeof(AVPicture);
1908 opkt.flags |= AV_PKT_FLAG_KEY;
1911 write_frame(of->ctx, &opkt, ost);
1914 int guess_input_channel_layout(InputStream *ist)
1916 AVCodecContext *dec = ist->dec_ctx;
1918 if (!dec->channel_layout) {
1919 char layout_name[256];
1921 if (dec->channels > ist->guess_layout_max)
1923 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1924 if (!dec->channel_layout)
1926 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1927 dec->channels, dec->channel_layout);
1928 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1929 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1934 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1936 AVFrame *decoded_frame, *f;
1937 AVCodecContext *avctx = ist->dec_ctx;
1938 int i, ret, err = 0, resample_changed;
1939 AVRational decoded_frame_tb;
1941 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1942 return AVERROR(ENOMEM);
1943 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1944 return AVERROR(ENOMEM);
1945 decoded_frame = ist->decoded_frame;
1947 update_benchmark(NULL);
1948 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1949 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1951 if (ret >= 0 && avctx->sample_rate <= 0) {
1952 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1953 ret = AVERROR_INVALIDDATA;
1956 if (*got_output || ret<0)
1957 decode_error_stat[ret<0] ++;
1959 if (ret < 0 && exit_on_error)
1962 if (!*got_output || ret < 0)
1965 ist->samples_decoded += decoded_frame->nb_samples;
1966 ist->frames_decoded++;
1969 /* increment next_dts to use for the case where the input stream does not
1970 have timestamps or there are multiple frames in the packet */
1971 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1973 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1977 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1978 ist->resample_channels != avctx->channels ||
1979 ist->resample_channel_layout != decoded_frame->channel_layout ||
1980 ist->resample_sample_rate != decoded_frame->sample_rate;
1981 if (resample_changed) {
1982 char layout1[64], layout2[64];
1984 if (!guess_input_channel_layout(ist)) {
1985 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986 "layout for Input Stream #%d.%d\n", ist->file_index,
1990 decoded_frame->channel_layout = avctx->channel_layout;
1992 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1993 ist->resample_channel_layout);
1994 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995 decoded_frame->channel_layout);
1997 av_log(NULL, AV_LOG_INFO,
1998 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999 ist->file_index, ist->st->index,
2000 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2001 ist->resample_channels, layout1,
2002 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003 avctx->channels, layout2);
2005 ist->resample_sample_fmt = decoded_frame->format;
2006 ist->resample_sample_rate = decoded_frame->sample_rate;
2007 ist->resample_channel_layout = decoded_frame->channel_layout;
2008 ist->resample_channels = avctx->channels;
2010 for (i = 0; i < nb_filtergraphs; i++)
2011 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012 FilterGraph *fg = filtergraphs[i];
2013 if (configure_filtergraph(fg) < 0) {
2014 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2020 /* if the decoder provides a pts, use it instead of the last packet pts.
2021 the decoder could be delaying output by a packet or more. */
2022 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024 decoded_frame_tb = avctx->time_base;
2025 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = decoded_frame->pkt_pts;
2027 decoded_frame_tb = ist->st->time_base;
2028 } else if (pkt->pts != AV_NOPTS_VALUE) {
2029 decoded_frame->pts = pkt->pts;
2030 decoded_frame_tb = ist->st->time_base;
2032 decoded_frame->pts = ist->dts;
2033 decoded_frame_tb = AV_TIME_BASE_Q;
2035 pkt->pts = AV_NOPTS_VALUE;
2036 if (decoded_frame->pts != AV_NOPTS_VALUE)
2037 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039 (AVRational){1, avctx->sample_rate});
2040 for (i = 0; i < ist->nb_filters; i++) {
2041 if (i < ist->nb_filters - 1) {
2042 f = ist->filter_frame;
2043 err = av_frame_ref(f, decoded_frame);
2048 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2049 AV_BUFFERSRC_FLAG_PUSH);
2050 if (err == AVERROR_EOF)
2051 err = 0; /* ignore */
2055 decoded_frame->pts = AV_NOPTS_VALUE;
2057 av_frame_unref(ist->filter_frame);
2058 av_frame_unref(decoded_frame);
2059 return err < 0 ? err : ret;
2062 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2064 AVFrame *decoded_frame, *f;
2065 int i, ret = 0, err = 0, resample_changed;
2066 int64_t best_effort_timestamp;
2067 AVRational *frame_sample_aspect;
2069 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2072 return AVERROR(ENOMEM);
2073 decoded_frame = ist->decoded_frame;
2074 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2076 update_benchmark(NULL);
2077 ret = avcodec_decode_video2(ist->dec_ctx,
2078 decoded_frame, got_output, pkt);
2079 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2081 // The following line may be required in some cases where there is no parser
2082 // or the parser does not has_b_frames correctly
2083 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2084 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2085 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2087 av_log(ist->dec_ctx, AV_LOG_WARNING,
2088 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2089 "If you want to help, upload a sample "
2090 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2091 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2092 ist->dec_ctx->has_b_frames,
2093 ist->st->codec->has_b_frames);
2096 if (*got_output || ret<0)
2097 decode_error_stat[ret<0] ++;
2099 if (ret < 0 && exit_on_error)
2102 if (*got_output && ret >= 0) {
2103 if (ist->dec_ctx->width != decoded_frame->width ||
2104 ist->dec_ctx->height != decoded_frame->height ||
2105 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2106 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2107 decoded_frame->width,
2108 decoded_frame->height,
2109 decoded_frame->format,
2110 ist->dec_ctx->width,
2111 ist->dec_ctx->height,
2112 ist->dec_ctx->pix_fmt);
2116 if (!*got_output || ret < 0)
2119 if(ist->top_field_first>=0)
2120 decoded_frame->top_field_first = ist->top_field_first;
2122 ist->frames_decoded++;
2124 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2125 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2129 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2131 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2132 if(best_effort_timestamp != AV_NOPTS_VALUE)
2133 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2136 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2137 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2138 ist->st->index, av_ts2str(decoded_frame->pts),
2139 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2140 best_effort_timestamp,
2141 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2142 decoded_frame->key_frame, decoded_frame->pict_type,
2143 ist->st->time_base.num, ist->st->time_base.den);
2148 if (ist->st->sample_aspect_ratio.num)
2149 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2151 resample_changed = ist->resample_width != decoded_frame->width ||
2152 ist->resample_height != decoded_frame->height ||
2153 ist->resample_pix_fmt != decoded_frame->format;
2154 if (resample_changed) {
2155 av_log(NULL, AV_LOG_INFO,
2156 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2157 ist->file_index, ist->st->index,
2158 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2159 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2161 ist->resample_width = decoded_frame->width;
2162 ist->resample_height = decoded_frame->height;
2163 ist->resample_pix_fmt = decoded_frame->format;
2165 for (i = 0; i < nb_filtergraphs; i++) {
2166 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2167 configure_filtergraph(filtergraphs[i]) < 0) {
2168 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2174 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2175 for (i = 0; i < ist->nb_filters; i++) {
2176 if (!frame_sample_aspect->num)
2177 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2179 if (i < ist->nb_filters - 1) {
2180 f = ist->filter_frame;
2181 err = av_frame_ref(f, decoded_frame);
2186 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2187 if (ret == AVERROR_EOF) {
2188 ret = 0; /* ignore */
2189 } else if (ret < 0) {
2190 av_log(NULL, AV_LOG_FATAL,
2191 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2197 av_frame_unref(ist->filter_frame);
2198 av_frame_unref(decoded_frame);
2199 return err < 0 ? err : ret;
2202 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2204 AVSubtitle subtitle;
2205 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2206 &subtitle, got_output, pkt);
2208 if (*got_output || ret<0)
2209 decode_error_stat[ret<0] ++;
2211 if (ret < 0 && exit_on_error)
2214 if (ret < 0 || !*got_output) {
2216 sub2video_flush(ist);
2220 if (ist->fix_sub_duration) {
2222 if (ist->prev_sub.got_output) {
2223 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2224 1000, AV_TIME_BASE);
2225 if (end < ist->prev_sub.subtitle.end_display_time) {
2226 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2227 "Subtitle duration reduced from %d to %d%s\n",
2228 ist->prev_sub.subtitle.end_display_time, end,
2229 end <= 0 ? ", dropping it" : "");
2230 ist->prev_sub.subtitle.end_display_time = end;
2233 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2234 FFSWAP(int, ret, ist->prev_sub.ret);
2235 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2243 sub2video_update(ist, &subtitle);
2245 if (!subtitle.num_rects)
2248 ist->frames_decoded++;
2250 for (i = 0; i < nb_output_streams; i++) {
2251 OutputStream *ost = output_streams[i];
2253 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2254 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2257 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2261 avsubtitle_free(&subtitle);
2265 static int send_filter_eof(InputStream *ist)
2268 for (i = 0; i < ist->nb_filters; i++) {
2269 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2276 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2277 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2283 if (!ist->saw_first_ts) {
2284 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2286 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2287 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2288 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2290 ist->saw_first_ts = 1;
2293 if (ist->next_dts == AV_NOPTS_VALUE)
2294 ist->next_dts = ist->dts;
2295 if (ist->next_pts == AV_NOPTS_VALUE)
2296 ist->next_pts = ist->pts;
2300 av_init_packet(&avpkt);
2308 if (pkt->dts != AV_NOPTS_VALUE) {
2309 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2310 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2311 ist->next_pts = ist->pts = ist->dts;
2314 // while we have more to decode or while the decoder did output something on EOF
2315 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2319 ist->pts = ist->next_pts;
2320 ist->dts = ist->next_dts;
2322 if (avpkt.size && avpkt.size != pkt->size &&
2323 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2324 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2325 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2326 ist->showed_multi_packet_warning = 1;
2329 switch (ist->dec_ctx->codec_type) {
2330 case AVMEDIA_TYPE_AUDIO:
2331 ret = decode_audio (ist, &avpkt, &got_output);
2333 case AVMEDIA_TYPE_VIDEO:
2334 ret = decode_video (ist, &avpkt, &got_output);
2335 if (avpkt.duration) {
2336 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2337 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2338 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2339 duration = ((int64_t)AV_TIME_BASE *
2340 ist->dec_ctx->framerate.den * ticks) /
2341 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2345 if(ist->dts != AV_NOPTS_VALUE && duration) {
2346 ist->next_dts += duration;
2348 ist->next_dts = AV_NOPTS_VALUE;
2351 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2353 case AVMEDIA_TYPE_SUBTITLE:
2354 ret = transcode_subtitles(ist, &avpkt, &got_output);
2361 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2362 ist->file_index, ist->st->index, av_err2str(ret));
2369 avpkt.pts= AV_NOPTS_VALUE;
2371 // touch data and size only if not EOF
2373 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2381 if (got_output && !pkt)
2385 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2386 if (!pkt && ist->decoding_needed && !got_output) {
2387 int ret = send_filter_eof(ist);
2389 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2394 /* handle stream copy */
2395 if (!ist->decoding_needed) {
2396 ist->dts = ist->next_dts;
2397 switch (ist->dec_ctx->codec_type) {
2398 case AVMEDIA_TYPE_AUDIO:
2399 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2400 ist->dec_ctx->sample_rate;
2402 case AVMEDIA_TYPE_VIDEO:
2403 if (ist->framerate.num) {
2404 // TODO: Remove work-around for c99-to-c89 issue 7
2405 AVRational time_base_q = AV_TIME_BASE_Q;
2406 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2407 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2408 } else if (pkt->duration) {
2409 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2410 } else if(ist->dec_ctx->framerate.num != 0) {
2411 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2412 ist->next_dts += ((int64_t)AV_TIME_BASE *
2413 ist->dec_ctx->framerate.den * ticks) /
2414 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2418 ist->pts = ist->dts;
2419 ist->next_pts = ist->next_dts;
2421 for (i = 0; pkt && i < nb_output_streams; i++) {
2422 OutputStream *ost = output_streams[i];
2424 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2427 do_streamcopy(ist, ost, pkt);
2433 static void print_sdp(void)
2438 AVIOContext *sdp_pb;
2439 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2443 for (i = 0, j = 0; i < nb_output_files; i++) {
2444 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2445 avc[j] = output_files[i]->ctx;
2450 av_sdp_create(avc, j, sdp, sizeof(sdp));
2452 if (!sdp_filename) {
2453 printf("SDP:\n%s\n", sdp);
2456 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2457 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2459 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2460 avio_closep(&sdp_pb);
2461 av_freep(&sdp_filename);
2468 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2471 for (i = 0; hwaccels[i].name; i++)
2472 if (hwaccels[i].pix_fmt == pix_fmt)
2473 return &hwaccels[i];
2477 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2479 InputStream *ist = s->opaque;
2480 const enum AVPixelFormat *p;
2483 for (p = pix_fmts; *p != -1; p++) {
2484 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2485 const HWAccel *hwaccel;
2487 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2490 hwaccel = get_hwaccel(*p);
2492 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2493 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2496 ret = hwaccel->init(s);
2498 if (ist->hwaccel_id == hwaccel->id) {
2499 av_log(NULL, AV_LOG_FATAL,
2500 "%s hwaccel requested for input stream #%d:%d, "
2501 "but cannot be initialized.\n", hwaccel->name,
2502 ist->file_index, ist->st->index);
2503 return AV_PIX_FMT_NONE;
2507 ist->active_hwaccel_id = hwaccel->id;
2508 ist->hwaccel_pix_fmt = *p;
2515 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2517 InputStream *ist = s->opaque;
2519 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2520 return ist->hwaccel_get_buffer(s, frame, flags);
2522 return avcodec_default_get_buffer2(s, frame, flags);
2525 static int init_input_stream(int ist_index, char *error, int error_len)
2528 InputStream *ist = input_streams[ist_index];
2530 if (ist->decoding_needed) {
2531 AVCodec *codec = ist->dec;
2533 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2534 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2535 return AVERROR(EINVAL);
2538 ist->dec_ctx->opaque = ist;
2539 ist->dec_ctx->get_format = get_format;
2540 ist->dec_ctx->get_buffer2 = get_buffer;
2541 ist->dec_ctx->thread_safe_callbacks = 1;
2543 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2544 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2545 (ist->decoding_needed & DECODING_FOR_OST)) {
2546 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2547 if (ist->decoding_needed & DECODING_FOR_FILTER)
2548 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2551 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2552 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2553 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2554 if (ret == AVERROR_EXPERIMENTAL)
2555 abort_codec_experimental(codec, 0);
2557 snprintf(error, error_len,
2558 "Error while opening decoder for input stream "
2560 ist->file_index, ist->st->index, av_err2str(ret));
2563 assert_avoptions(ist->decoder_opts);
2566 ist->next_pts = AV_NOPTS_VALUE;
2567 ist->next_dts = AV_NOPTS_VALUE;
2572 static InputStream *get_input_stream(OutputStream *ost)
2574 if (ost->source_index >= 0)
2575 return input_streams[ost->source_index];
2579 static int compare_int64(const void *a, const void *b)
2581 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2582 return va < vb ? -1 : va > vb ? +1 : 0;
2585 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2589 if (ost->encoding_needed) {
2590 AVCodec *codec = ost->enc;
2591 AVCodecContext *dec = NULL;
2594 if ((ist = get_input_stream(ost)))
2596 if (dec && dec->subtitle_header) {
2597 /* ASS code assumes this buffer is null terminated so add extra byte. */
2598 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2599 if (!ost->enc_ctx->subtitle_header)
2600 return AVERROR(ENOMEM);
2601 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2602 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2604 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2605 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2606 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2608 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2609 if (ret == AVERROR_EXPERIMENTAL)
2610 abort_codec_experimental(codec, 1);
2611 snprintf(error, error_len,
2612 "Error while opening encoder for output stream #%d:%d - "
2613 "maybe incorrect parameters such as bit_rate, rate, width or height",
2614 ost->file_index, ost->index);
2617 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2618 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2619 av_buffersink_set_frame_size(ost->filter->filter,
2620 ost->enc_ctx->frame_size);
2621 assert_avoptions(ost->encoder_opts);
2622 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2623 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2624 " It takes bits/s as argument, not kbits/s\n");
2626 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2628 av_log(NULL, AV_LOG_FATAL,
2629 "Error initializing the output stream codec context.\n");
2633 // copy timebase while removing common factors
2634 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2635 ost->st->codec->codec= ost->enc_ctx->codec;
2637 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2639 av_log(NULL, AV_LOG_FATAL,
2640 "Error setting up codec context options.\n");
2643 // copy timebase while removing common factors
2644 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2650 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2651 AVCodecContext *avctx)
2654 int n = 1, i, size, index = 0;
2657 for (p = kf; *p; p++)
2661 pts = av_malloc_array(size, sizeof(*pts));
2663 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2668 for (i = 0; i < n; i++) {
2669 char *next = strchr(p, ',');
2674 if (!memcmp(p, "chapters", 8)) {
2676 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2679 if (avf->nb_chapters > INT_MAX - size ||
2680 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2682 av_log(NULL, AV_LOG_FATAL,
2683 "Could not allocate forced key frames array.\n");
2686 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2687 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2689 for (j = 0; j < avf->nb_chapters; j++) {
2690 AVChapter *c = avf->chapters[j];
2691 av_assert1(index < size);
2692 pts[index++] = av_rescale_q(c->start, c->time_base,
2693 avctx->time_base) + t;
2698 t = parse_time_or_die("force_key_frames", p, 1);
2699 av_assert1(index < size);
2700 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2707 av_assert0(index == size);
2708 qsort(pts, size, sizeof(*pts), compare_int64);
2709 ost->forced_kf_count = size;
2710 ost->forced_kf_pts = pts;
2713 static void report_new_stream(int input_index, AVPacket *pkt)
2715 InputFile *file = input_files[input_index];
2716 AVStream *st = file->ctx->streams[pkt->stream_index];
2718 if (pkt->stream_index < file->nb_streams_warn)
2720 av_log(file->ctx, AV_LOG_WARNING,
2721 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2722 av_get_media_type_string(st->codec->codec_type),
2723 input_index, pkt->stream_index,
2724 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2725 file->nb_streams_warn = pkt->stream_index + 1;
2728 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2730 AVDictionaryEntry *e;
2732 uint8_t *encoder_string;
2733 int encoder_string_len;
2734 int format_flags = 0;
2735 int codec_flags = 0;
2737 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2740 e = av_dict_get(of->opts, "fflags", NULL, 0);
2742 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2745 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2747 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2749 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2752 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2755 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2756 encoder_string = av_mallocz(encoder_string_len);
2757 if (!encoder_string)
2760 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2761 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2763 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2764 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2765 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2766 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2769 static int transcode_init(void)
2771 int ret = 0, i, j, k;
2772 AVFormatContext *oc;
2775 char error[1024] = {0};
2778 for (i = 0; i < nb_filtergraphs; i++) {
2779 FilterGraph *fg = filtergraphs[i];
2780 for (j = 0; j < fg->nb_outputs; j++) {
2781 OutputFilter *ofilter = fg->outputs[j];
2782 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2784 if (fg->nb_inputs != 1)
2786 for (k = nb_input_streams-1; k >= 0 ; k--)
2787 if (fg->inputs[0]->ist == input_streams[k])
2789 ofilter->ost->source_index = k;
2793 /* init framerate emulation */
2794 for (i = 0; i < nb_input_files; i++) {
2795 InputFile *ifile = input_files[i];
2796 if (ifile->rate_emu)
2797 for (j = 0; j < ifile->nb_streams; j++)
2798 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2801 /* for each output stream, we compute the right encoding parameters */
2802 for (i = 0; i < nb_output_streams; i++) {
2803 AVCodecContext *enc_ctx;
2804 AVCodecContext *dec_ctx = NULL;
2805 ost = output_streams[i];
2806 oc = output_files[ost->file_index]->ctx;
2807 ist = get_input_stream(ost);
2809 if (ost->attachment_filename)
2812 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2815 dec_ctx = ist->dec_ctx;
2817 ost->st->disposition = ist->st->disposition;
2818 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2819 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2821 for (j=0; j<oc->nb_streams; j++) {
2822 AVStream *st = oc->streams[j];
2823 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2826 if (j == oc->nb_streams)
2827 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2828 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2831 if (ost->stream_copy) {
2833 uint64_t extra_size;
2835 av_assert0(ist && !ost->filter);
2837 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2839 if (extra_size > INT_MAX) {
2840 return AVERROR(EINVAL);
2843 /* if stream_copy is selected, no need to decode or encode */
2844 enc_ctx->codec_id = dec_ctx->codec_id;
2845 enc_ctx->codec_type = dec_ctx->codec_type;
2847 if (!enc_ctx->codec_tag) {
2848 unsigned int codec_tag;
2849 if (!oc->oformat->codec_tag ||
2850 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2851 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2852 enc_ctx->codec_tag = dec_ctx->codec_tag;
2855 enc_ctx->bit_rate = dec_ctx->bit_rate;
2856 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2857 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2858 enc_ctx->field_order = dec_ctx->field_order;
2859 if (dec_ctx->extradata_size) {
2860 enc_ctx->extradata = av_mallocz(extra_size);
2861 if (!enc_ctx->extradata) {
2862 return AVERROR(ENOMEM);
2864 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2866 enc_ctx->extradata_size= dec_ctx->extradata_size;
2867 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2869 enc_ctx->time_base = ist->st->time_base;
2871 * Avi is a special case here because it supports variable fps but
2872 * having the fps and timebase differe significantly adds quite some
2875 if(!strcmp(oc->oformat->name, "avi")) {
2876 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2877 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2878 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2879 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2881 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2882 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2883 enc_ctx->ticks_per_frame = 2;
2884 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2885 && av_q2d(ist->st->time_base) < 1.0/500
2887 enc_ctx->time_base = dec_ctx->time_base;
2888 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2889 enc_ctx->time_base.den *= 2;
2890 enc_ctx->ticks_per_frame = 2;
2892 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2893 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2894 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2895 && strcmp(oc->oformat->name, "f4v")
2897 if( copy_tb<0 && dec_ctx->time_base.den
2898 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2899 && av_q2d(ist->st->time_base) < 1.0/500
2901 enc_ctx->time_base = dec_ctx->time_base;
2902 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2905 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2906 && dec_ctx->time_base.num < dec_ctx->time_base.den
2907 && dec_ctx->time_base.num > 0
2908 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2909 enc_ctx->time_base = dec_ctx->time_base;
2912 if (ist && !ost->frame_rate.num)
2913 ost->frame_rate = ist->framerate;
2914 if(ost->frame_rate.num)
2915 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2917 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2918 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2920 if (ist->st->nb_side_data) {
2921 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2922 sizeof(*ist->st->side_data));
2923 if (!ost->st->side_data)
2924 return AVERROR(ENOMEM);
2926 ost->st->nb_side_data = 0;
2927 for (j = 0; j < ist->st->nb_side_data; j++) {
2928 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2929 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2931 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2934 sd_dst->data = av_malloc(sd_src->size);
2936 return AVERROR(ENOMEM);
2937 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2938 sd_dst->size = sd_src->size;
2939 sd_dst->type = sd_src->type;
2940 ost->st->nb_side_data++;
2944 ost->parser = av_parser_init(enc_ctx->codec_id);
2946 switch (enc_ctx->codec_type) {
2947 case AVMEDIA_TYPE_AUDIO:
2948 if (audio_volume != 256) {
2949 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2952 enc_ctx->channel_layout = dec_ctx->channel_layout;
2953 enc_ctx->sample_rate = dec_ctx->sample_rate;
2954 enc_ctx->channels = dec_ctx->channels;
2955 enc_ctx->frame_size = dec_ctx->frame_size;
2956 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2957 enc_ctx->block_align = dec_ctx->block_align;
2958 enc_ctx->initial_padding = dec_ctx->delay;
2959 #if FF_API_AUDIOENC_DELAY
2960 enc_ctx->delay = dec_ctx->delay;
2962 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2963 enc_ctx->block_align= 0;
2964 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2965 enc_ctx->block_align= 0;
2967 case AVMEDIA_TYPE_VIDEO:
2968 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2969 enc_ctx->width = dec_ctx->width;
2970 enc_ctx->height = dec_ctx->height;
2971 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2972 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2974 av_mul_q(ost->frame_aspect_ratio,
2975 (AVRational){ enc_ctx->height, enc_ctx->width });
2976 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2977 "with stream copy may produce invalid files\n");
2979 else if (ist->st->sample_aspect_ratio.num)
2980 sar = ist->st->sample_aspect_ratio;
2982 sar = dec_ctx->sample_aspect_ratio;
2983 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2984 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2985 ost->st->r_frame_rate = ist->st->r_frame_rate;
2987 case AVMEDIA_TYPE_SUBTITLE:
2988 enc_ctx->width = dec_ctx->width;
2989 enc_ctx->height = dec_ctx->height;
2991 case AVMEDIA_TYPE_UNKNOWN:
2992 case AVMEDIA_TYPE_DATA:
2993 case AVMEDIA_TYPE_ATTACHMENT:
3000 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3002 /* should only happen when a default codec is not present. */
3003 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3004 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3005 ret = AVERROR(EINVAL);
3009 set_encoder_id(output_files[ost->file_index], ost);
3012 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3013 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3015 fg = init_simple_filtergraph(ist, ost);
3016 if (configure_filtergraph(fg)) {
3017 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3022 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3023 if (!ost->frame_rate.num)
3024 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3025 if (ist && !ost->frame_rate.num)
3026 ost->frame_rate = ist->framerate;
3027 if (ist && !ost->frame_rate.num)
3028 ost->frame_rate = ist->st->r_frame_rate;
3029 if (ist && !ost->frame_rate.num) {
3030 ost->frame_rate = (AVRational){25, 1};
3031 av_log(NULL, AV_LOG_WARNING,
3033 "about the input framerate is available. Falling "
3034 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3035 "if you want a different framerate.\n",
3036 ost->file_index, ost->index);
3038 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3039 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3040 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3041 ost->frame_rate = ost->enc->supported_framerates[idx];
3043 // reduce frame rate for mpeg4 to be within the spec limits
3044 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3045 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3046 ost->frame_rate.num, ost->frame_rate.den, 65535);
3050 switch (enc_ctx->codec_type) {
3051 case AVMEDIA_TYPE_AUDIO:
3052 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3053 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3054 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3055 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3056 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3058 case AVMEDIA_TYPE_VIDEO:
3059 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3060 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3061 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3062 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3063 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3064 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3065 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3067 for (j = 0; j < ost->forced_kf_count; j++)
3068 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3070 enc_ctx->time_base);
3072 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3073 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3074 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3075 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3076 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3077 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3078 if (!strncmp(ost->enc->name, "libx264", 7) &&
3079 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3080 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3081 av_log(NULL, AV_LOG_WARNING,
3082 "No pixel format specified, %s for H.264 encoding chosen.\n"
3083 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3084 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3085 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3086 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3087 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3088 av_log(NULL, AV_LOG_WARNING,
3089 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3090 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3091 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3092 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3094 ost->st->avg_frame_rate = ost->frame_rate;
3097 enc_ctx->width != dec_ctx->width ||
3098 enc_ctx->height != dec_ctx->height ||
3099 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3100 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3103 if (ost->forced_keyframes) {
3104 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3105 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3106 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3108 av_log(NULL, AV_LOG_ERROR,
3109 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3112 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3113 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3114 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3115 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3117 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3118 // parse it only for static kf timings
3119 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3120 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3124 case AVMEDIA_TYPE_SUBTITLE:
3125 enc_ctx->time_base = (AVRational){1, 1000};
3126 if (!enc_ctx->width) {
3127 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3128 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3131 case AVMEDIA_TYPE_DATA:
3139 if (ost->disposition) {
3140 static const AVOption opts[] = {
3141 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3142 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3143 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3144 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3145 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3146 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3147 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3148 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3149 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3150 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3151 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3152 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3153 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3154 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3157 static const AVClass class = {
3159 .item_name = av_default_item_name,
3161 .version = LIBAVUTIL_VERSION_INT,
3163 const AVClass *pclass = &class;
3165 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3171 /* open each encoder */
3172 for (i = 0; i < nb_output_streams; i++) {
3173 ret = init_output_stream(output_streams[i], error, sizeof(error));
3178 /* init input streams */
3179 for (i = 0; i < nb_input_streams; i++)
3180 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3181 for (i = 0; i < nb_output_streams; i++) {
3182 ost = output_streams[i];
3183 avcodec_close(ost->enc_ctx);
3188 /* discard unused programs */
3189 for (i = 0; i < nb_input_files; i++) {
3190 InputFile *ifile = input_files[i];
3191 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3192 AVProgram *p = ifile->ctx->programs[j];
3193 int discard = AVDISCARD_ALL;
3195 for (k = 0; k < p->nb_stream_indexes; k++)
3196 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3197 discard = AVDISCARD_DEFAULT;
3200 p->discard = discard;
3204 /* open files and write file headers */
3205 for (i = 0; i < nb_output_files; i++) {
3206 oc = output_files[i]->ctx;
3207 oc->interrupt_callback = int_cb;
3208 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3209 snprintf(error, sizeof(error),
3210 "Could not write header for output file #%d "
3211 "(incorrect codec parameters ?): %s",
3212 i, av_err2str(ret));
3213 ret = AVERROR(EINVAL);
3216 // assert_avoptions(output_files[i]->opts);
3217 if (strcmp(oc->oformat->name, "rtp")) {
3223 /* dump the file output parameters - cannot be done before in case
3225 for (i = 0; i < nb_output_files; i++) {
3226 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3229 /* dump the stream mapping */
3230 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3231 for (i = 0; i < nb_input_streams; i++) {
3232 ist = input_streams[i];
3234 for (j = 0; j < ist->nb_filters; j++) {
3235 if (ist->filters[j]->graph->graph_desc) {
3236 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3237 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3238 ist->filters[j]->name);
3239 if (nb_filtergraphs > 1)
3240 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3241 av_log(NULL, AV_LOG_INFO, "\n");
3246 for (i = 0; i < nb_output_streams; i++) {
3247 ost = output_streams[i];
3249 if (ost->attachment_filename) {
3250 /* an attached file */
3251 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3252 ost->attachment_filename, ost->file_index, ost->index);
3256 if (ost->filter && ost->filter->graph->graph_desc) {
3257 /* output from a complex graph */
3258 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3259 if (nb_filtergraphs > 1)
3260 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3262 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3263 ost->index, ost->enc ? ost->enc->name : "?");
3267 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3268 input_streams[ost->source_index]->file_index,
3269 input_streams[ost->source_index]->st->index,
3272 if (ost->sync_ist != input_streams[ost->source_index])
3273 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3274 ost->sync_ist->file_index,
3275 ost->sync_ist->st->index);
3276 if (ost->stream_copy)
3277 av_log(NULL, AV_LOG_INFO, " (copy)");
3279 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3280 const AVCodec *out_codec = ost->enc;
3281 const char *decoder_name = "?";
3282 const char *in_codec_name = "?";
3283 const char *encoder_name = "?";
3284 const char *out_codec_name = "?";
3285 const AVCodecDescriptor *desc;
3288 decoder_name = in_codec->name;
3289 desc = avcodec_descriptor_get(in_codec->id);
3291 in_codec_name = desc->name;
3292 if (!strcmp(decoder_name, in_codec_name))
3293 decoder_name = "native";
3297 encoder_name = out_codec->name;
3298 desc = avcodec_descriptor_get(out_codec->id);
3300 out_codec_name = desc->name;
3301 if (!strcmp(encoder_name, out_codec_name))
3302 encoder_name = "native";
3305 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3306 in_codec_name, decoder_name,
3307 out_codec_name, encoder_name);
3309 av_log(NULL, AV_LOG_INFO, "\n");
3313 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3317 if (sdp_filename || want_sdp) {
3321 transcode_init_done = 1;
3326 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3327 static int need_output(void)
3331 for (i = 0; i < nb_output_streams; i++) {
3332 OutputStream *ost = output_streams[i];
3333 OutputFile *of = output_files[ost->file_index];
3334 AVFormatContext *os = output_files[ost->file_index]->ctx;
3336 if (ost->finished ||
3337 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3339 if (ost->frame_number >= ost->max_frames) {
3341 for (j = 0; j < of->ctx->nb_streams; j++)
3342 close_output_stream(output_streams[of->ost_index + j]);
3353 * Select the output stream to process.
3355 * @return selected output stream, or NULL if none available
3357 static OutputStream *choose_output(void)
3360 int64_t opts_min = INT64_MAX;
3361 OutputStream *ost_min = NULL;
3363 for (i = 0; i < nb_output_streams; i++) {
3364 OutputStream *ost = output_streams[i];
3365 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3367 if (!ost->finished && opts < opts_min) {
3369 ost_min = ost->unavailable ? NULL : ost;
3375 static int check_keyboard_interaction(int64_t cur_time)
3378 static int64_t last_time;
3379 if (received_nb_signals)
3380 return AVERROR_EXIT;
3381 /* read_key() returns 0 on EOF */
3382 if(cur_time - last_time >= 100000 && !run_as_daemon){
3384 last_time = cur_time;
3388 return AVERROR_EXIT;
3389 if (key == '+') av_log_set_level(av_log_get_level()+10);
3390 if (key == '-') av_log_set_level(av_log_get_level()-10);
3391 if (key == 's') qp_hist ^= 1;
3394 do_hex_dump = do_pkt_dump = 0;
3395 } else if(do_pkt_dump){
3399 av_log_set_level(AV_LOG_DEBUG);
3401 if (key == 'c' || key == 'C'){
3402 char buf[4096], target[64], command[256], arg[256] = {0};
3405 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3407 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3412 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3413 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3414 target, time, command, arg);
3415 for (i = 0; i < nb_filtergraphs; i++) {
3416 FilterGraph *fg = filtergraphs[i];
3419 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3420 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3421 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3422 } else if (key == 'c') {
3423 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3424 ret = AVERROR_PATCHWELCOME;
3426 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3428 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3433 av_log(NULL, AV_LOG_ERROR,
3434 "Parse error, at least 3 arguments were expected, "
3435 "only %d given in string '%s'\n", n, buf);
3438 if (key == 'd' || key == 'D'){
3441 debug = input_streams[0]->st->codec->debug<<1;
3442 if(!debug) debug = 1;
3443 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3449 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3453 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3454 fprintf(stderr,"error parsing debug value\n");
3456 for(i=0;i<nb_input_streams;i++) {
3457 input_streams[i]->st->codec->debug = debug;
3459 for(i=0;i<nb_output_streams;i++) {
3460 OutputStream *ost = output_streams[i];
3461 ost->enc_ctx->debug = debug;
3463 if(debug) av_log_set_level(AV_LOG_DEBUG);
3464 fprintf(stderr,"debug=%d\n", debug);
3467 fprintf(stderr, "key function\n"
3468 "? show this help\n"
3469 "+ increase verbosity\n"
3470 "- decrease verbosity\n"
3471 "c Send command to first matching filter supporting it\n"
3472 "C Send/Que command to all matching filters\n"
3473 "D cycle through available debug modes\n"
3474 "h dump packets/hex press to cycle through the 3 states\n"
3476 "s Show QP histogram\n"
3483 static void *input_thread(void *arg)
3486 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3491 ret = av_read_frame(f->ctx, &pkt);
3493 if (ret == AVERROR(EAGAIN)) {
3498 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3501 av_dup_packet(&pkt);
3502 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3503 if (flags && ret == AVERROR(EAGAIN)) {
3505 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3506 av_log(f->ctx, AV_LOG_WARNING,
3507 "Thread message queue blocking; consider raising the "
3508 "thread_queue_size option (current value: %d)\n",
3509 f->thread_queue_size);
3512 if (ret != AVERROR_EOF)
3513 av_log(f->ctx, AV_LOG_ERROR,
3514 "Unable to send packet to main thread: %s\n",
3516 av_free_packet(&pkt);
3517 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3525 static void free_input_threads(void)
3529 for (i = 0; i < nb_input_files; i++) {
3530 InputFile *f = input_files[i];
3533 if (!f || !f->in_thread_queue)
3535 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3536 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3537 av_free_packet(&pkt);
3539 pthread_join(f->thread, NULL);
3541 av_thread_message_queue_free(&f->in_thread_queue);
3545 static int init_input_threads(void)
3549 if (nb_input_files == 1)
3552 for (i = 0; i < nb_input_files; i++) {
3553 InputFile *f = input_files[i];
3555 if (f->ctx->pb ? !f->ctx->pb->seekable :
3556 strcmp(f->ctx->iformat->name, "lavfi"))
3557 f->non_blocking = 1;
3558 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3559 f->thread_queue_size, sizeof(AVPacket));
3563 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3564 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3565 av_thread_message_queue_free(&f->in_thread_queue);
3566 return AVERROR(ret);
3572 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3574 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3576 AV_THREAD_MESSAGE_NONBLOCK : 0);
3580 static int get_input_packet(InputFile *f, AVPacket *pkt)
3584 for (i = 0; i < f->nb_streams; i++) {
3585 InputStream *ist = input_streams[f->ist_index + i];
3586 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3587 int64_t now = av_gettime_relative() - ist->start;
3589 return AVERROR(EAGAIN);
3594 if (nb_input_files > 1)
3595 return get_input_packet_mt(f, pkt);
3597 return av_read_frame(f->ctx, pkt);
3600 static int got_eagain(void)
3603 for (i = 0; i < nb_output_streams; i++)
3604 if (output_streams[i]->unavailable)
3609 static void reset_eagain(void)
3612 for (i = 0; i < nb_input_files; i++)
3613 input_files[i]->eagain = 0;
3614 for (i = 0; i < nb_output_streams; i++)
3615 output_streams[i]->unavailable = 0;
3620 * - 0 -- one packet was read and processed
3621 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3622 * this function should be called again
3623 * - AVERROR_EOF -- this function should not be called again
3625 static int process_input(int file_index)
3627 InputFile *ifile = input_files[file_index];
3628 AVFormatContext *is;
3634 ret = get_input_packet(ifile, &pkt);
3636 if (ret == AVERROR(EAGAIN)) {
3641 if (ret != AVERROR_EOF) {
3642 print_error(is->filename, ret);
3647 for (i = 0; i < ifile->nb_streams; i++) {
3648 ist = input_streams[ifile->ist_index + i];
3649 if (ist->decoding_needed) {
3650 ret = process_input_packet(ist, NULL);
3655 /* mark all outputs that don't go through lavfi as finished */
3656 for (j = 0; j < nb_output_streams; j++) {
3657 OutputStream *ost = output_streams[j];
3659 if (ost->source_index == ifile->ist_index + i &&
3660 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3661 finish_output_stream(ost);
3665 ifile->eof_reached = 1;
3666 return AVERROR(EAGAIN);
3672 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3673 is->streams[pkt.stream_index]);
3675 /* the following test is needed in case new streams appear
3676 dynamically in stream : we ignore them */
3677 if (pkt.stream_index >= ifile->nb_streams) {
3678 report_new_stream(file_index, &pkt);
3679 goto discard_packet;
3682 ist = input_streams[ifile->ist_index + pkt.stream_index];
3684 ist->data_size += pkt.size;
3688 goto discard_packet;
3691 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3692 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3693 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3694 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3695 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3696 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3697 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3698 av_ts2str(input_files[ist->file_index]->ts_offset),
3699 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3702 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3703 int64_t stime, stime2;
3704 // Correcting starttime based on the enabled streams
3705 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3706 // so we instead do it here as part of discontinuity handling
3707 if ( ist->next_dts == AV_NOPTS_VALUE
3708 && ifile->ts_offset == -is->start_time
3709 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3710 int64_t new_start_time = INT64_MAX;
3711 for (i=0; i<is->nb_streams; i++) {
3712 AVStream *st = is->streams[i];
3713 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3715 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3717 if (new_start_time > is->start_time) {
3718 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3719 ifile->ts_offset = -new_start_time;
3723 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3724 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3725 ist->wrap_correction_done = 1;
3727 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3728 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3729 ist->wrap_correction_done = 0;
3731 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3732 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3733 ist->wrap_correction_done = 0;
3737 /* add the stream-global side data to the first packet */
3738 if (ist->nb_packets == 1) {
3739 if (ist->st->nb_side_data)
3740 av_packet_split_side_data(&pkt);
3741 for (i = 0; i < ist->st->nb_side_data; i++) {
3742 AVPacketSideData *src_sd = &ist->st->side_data[i];
3745 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3747 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3750 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3754 memcpy(dst_data, src_sd->data, src_sd->size);
3758 if (pkt.dts != AV_NOPTS_VALUE)
3759 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3760 if (pkt.pts != AV_NOPTS_VALUE)
3761 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3763 if (pkt.pts != AV_NOPTS_VALUE)
3764 pkt.pts *= ist->ts_scale;
3765 if (pkt.dts != AV_NOPTS_VALUE)
3766 pkt.dts *= ist->ts_scale;
3768 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3769 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3770 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3771 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3772 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3773 int64_t delta = pkt_dts - ifile->last_ts;
3774 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3775 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3776 ifile->ts_offset -= delta;
3777 av_log(NULL, AV_LOG_DEBUG,
3778 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3779 delta, ifile->ts_offset);
3780 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3781 if (pkt.pts != AV_NOPTS_VALUE)
3782 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3786 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3787 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3788 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3790 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3791 int64_t delta = pkt_dts - ist->next_dts;
3792 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3793 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3794 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3795 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3796 ifile->ts_offset -= delta;
3797 av_log(NULL, AV_LOG_DEBUG,
3798 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3799 delta, ifile->ts_offset);
3800 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3801 if (pkt.pts != AV_NOPTS_VALUE)
3802 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3805 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3806 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3807 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3808 pkt.dts = AV_NOPTS_VALUE;
3810 if (pkt.pts != AV_NOPTS_VALUE){
3811 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3812 delta = pkt_pts - ist->next_dts;
3813 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3814 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3815 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3816 pkt.pts = AV_NOPTS_VALUE;
3822 if (pkt.dts != AV_NOPTS_VALUE)
3823 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3826 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3827 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3828 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3829 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3830 av_ts2str(input_files[ist->file_index]->ts_offset),
3831 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3834 sub2video_heartbeat(ist, pkt.pts);
3836 process_input_packet(ist, &pkt);
3839 av_free_packet(&pkt);
3845 * Perform a step of transcoding for the specified filter graph.
3847 * @param[in] graph filter graph to consider
3848 * @param[out] best_ist input stream where a frame would allow to continue
3849 * @return 0 for success, <0 for error
3851 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3854 int nb_requests, nb_requests_max = 0;
3855 InputFilter *ifilter;
3859 ret = avfilter_graph_request_oldest(graph->graph);
3861 return reap_filters(0);
3863 if (ret == AVERROR_EOF) {
3864 ret = reap_filters(1);
3865 for (i = 0; i < graph->nb_outputs; i++)
3866 close_output_stream(graph->outputs[i]->ost);
3869 if (ret != AVERROR(EAGAIN))
3872 for (i = 0; i < graph->nb_inputs; i++) {
3873 ifilter = graph->inputs[i];
3875 if (input_files[ist->file_index]->eagain ||
3876 input_files[ist->file_index]->eof_reached)
3878 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3879 if (nb_requests > nb_requests_max) {
3880 nb_requests_max = nb_requests;
3886 for (i = 0; i < graph->nb_outputs; i++)
3887 graph->outputs[i]->ost->unavailable = 1;
3893 * Run a single step of transcoding.
3895 * @return 0 for success, <0 for error
3897 static int transcode_step(void)
3903 ost = choose_output();
3910 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3915 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3920 av_assert0(ost->source_index >= 0);
3921 ist = input_streams[ost->source_index];
3924 ret = process_input(ist->file_index);
3925 if (ret == AVERROR(EAGAIN)) {
3926 if (input_files[ist->file_index]->eagain)
3927 ost->unavailable = 1;
3932 return ret == AVERROR_EOF ? 0 : ret;
3934 return reap_filters(0);
3938 * The following code is the main loop of the file converter
3940 static int transcode(void)
3943 AVFormatContext *os;
3946 int64_t timer_start;
3948 ret = transcode_init();
3952 if (stdin_interaction) {
3953 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3956 timer_start = av_gettime_relative();
3959 if ((ret = init_input_threads()) < 0)
3963 while (!received_sigterm) {
3964 int64_t cur_time= av_gettime_relative();
3966 /* if 'q' pressed, exits */
3967 if (stdin_interaction)
3968 if (check_keyboard_interaction(cur_time) < 0)
3971 /* check if there's any stream where output is still needed */
3972 if (!need_output()) {
3973 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3977 ret = transcode_step();
3979 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3983 av_strerror(ret, errbuf, sizeof(errbuf));
3985 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3990 /* dump report by using the output first video and audio streams */
3991 print_report(0, timer_start, cur_time);
3994 free_input_threads();
3997 /* at the end of stream, we must flush the decoder buffers */
3998 for (i = 0; i < nb_input_streams; i++) {
3999 ist = input_streams[i];
4000 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4001 process_input_packet(ist, NULL);
4008 /* write the trailer if needed and close file */
4009 for (i = 0; i < nb_output_files; i++) {
4010 os = output_files[i]->ctx;
4011 av_write_trailer(os);
4014 /* dump report by using the first video and audio streams */
4015 print_report(1, timer_start, av_gettime_relative());
4017 /* close each encoder */
4018 for (i = 0; i < nb_output_streams; i++) {
4019 ost = output_streams[i];
4020 if (ost->encoding_needed) {
4021 av_freep(&ost->enc_ctx->stats_in);
4025 /* close each decoder */
4026 for (i = 0; i < nb_input_streams; i++) {
4027 ist = input_streams[i];
4028 if (ist->decoding_needed) {
4029 avcodec_close(ist->dec_ctx);
4030 if (ist->hwaccel_uninit)
4031 ist->hwaccel_uninit(ist->dec_ctx);
4040 free_input_threads();
4043 if (output_streams) {
4044 for (i = 0; i < nb_output_streams; i++) {
4045 ost = output_streams[i];
4048 fclose(ost->logfile);
4049 ost->logfile = NULL;
4051 av_freep(&ost->forced_kf_pts);
4052 av_freep(&ost->apad);
4053 av_freep(&ost->disposition);
4054 av_dict_free(&ost->encoder_opts);
4055 av_dict_free(&ost->sws_dict);
4056 av_dict_free(&ost->swr_opts);
4057 av_dict_free(&ost->resample_opts);
4058 av_dict_free(&ost->bsf_args);
4066 static int64_t getutime(void)
4069 struct rusage rusage;
4071 getrusage(RUSAGE_SELF, &rusage);
4072 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4073 #elif HAVE_GETPROCESSTIMES
4075 FILETIME c, e, k, u;
4076 proc = GetCurrentProcess();
4077 GetProcessTimes(proc, &c, &e, &k, &u);
4078 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4080 return av_gettime_relative();
4084 static int64_t getmaxrss(void)
4086 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4087 struct rusage rusage;
4088 getrusage(RUSAGE_SELF, &rusage);
4089 return (int64_t)rusage.ru_maxrss * 1024;
4090 #elif HAVE_GETPROCESSMEMORYINFO
4092 PROCESS_MEMORY_COUNTERS memcounters;
4093 proc = GetCurrentProcess();
4094 memcounters.cb = sizeof(memcounters);
4095 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4096 return memcounters.PeakPagefileUsage;
4102 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4106 int main(int argc, char **argv)
4111 register_exit(ffmpeg_cleanup);
4113 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4115 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4116 parse_loglevel(argc, argv, options);
4118 if(argc>1 && !strcmp(argv[1], "-d")){
4120 av_log_set_callback(log_callback_null);
4125 avcodec_register_all();
4127 avdevice_register_all();
4129 avfilter_register_all();
4131 avformat_network_init();
4133 show_banner(argc, argv, options);
4137 /* parse options and open all input/output files */
4138 ret = ffmpeg_parse_options(argc, argv);
4142 if (nb_output_files <= 0 && nb_input_files == 0) {
4144 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4148 /* file converter / grab */
4149 if (nb_output_files <= 0) {
4150 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4154 // if (nb_input_files == 0) {
4155 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4159 current_time = ti = getutime();
4160 if (transcode() < 0)
4162 ti = getutime() - ti;
4164 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4166 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4167 decode_error_stat[0], decode_error_stat[1]);
4168 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4171 exit_program(received_nb_signals ? 255 : main_return_code);
4172 return main_return_code;