2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avcodec.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static int nb_frames_drop = 0;
133 static int64_t decode_error_stat[2];
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
203 src = r->pict.data[0];
204 pal = (uint32_t *)r->pict.data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->pict.linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 istty = isatty(0) && isatty(2);
378 if (istty && tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > transcode_init_done;
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 av_freep(&fg->inputs[j]->name);
480 av_freep(&fg->inputs[j]);
482 av_freep(&fg->inputs);
483 for (j = 0; j < fg->nb_outputs; j++) {
484 av_freep(&fg->outputs[j]->name);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
512 AVBitStreamFilterContext *bsfc;
517 bsfc = ost->bitstream_filters;
519 AVBitStreamFilterContext *next = bsfc->next;
520 av_bitstream_filter_close(bsfc);
523 ost->bitstream_filters = NULL;
524 av_frame_free(&ost->filtered_frame);
525 av_frame_free(&ost->last_frame);
527 av_parser_close(ost->parser);
529 av_freep(&ost->forced_keyframes);
530 av_expr_free(ost->forced_keyframes_pexpr);
531 av_freep(&ost->avfilter);
532 av_freep(&ost->logfile_prefix);
534 av_freep(&ost->audio_channels_map);
535 ost->audio_channels_mapped = 0;
537 avcodec_free_context(&ost->enc_ctx);
539 av_freep(&output_streams[i]);
542 free_input_threads();
544 for (i = 0; i < nb_input_files; i++) {
545 avformat_close_input(&input_files[i]->ctx);
546 av_freep(&input_files[i]);
548 for (i = 0; i < nb_input_streams; i++) {
549 InputStream *ist = input_streams[i];
551 av_frame_free(&ist->decoded_frame);
552 av_frame_free(&ist->filter_frame);
553 av_dict_free(&ist->decoder_opts);
554 avsubtitle_free(&ist->prev_sub.subtitle);
555 av_frame_free(&ist->sub2video.frame);
556 av_freep(&ist->filters);
557 av_freep(&ist->hwaccel_device);
559 avcodec_free_context(&ist->dec_ctx);
561 av_freep(&input_streams[i]);
566 av_freep(&vstats_filename);
568 av_freep(&input_streams);
569 av_freep(&input_files);
570 av_freep(&output_streams);
571 av_freep(&output_files);
575 avformat_network_deinit();
577 if (received_sigterm) {
578 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579 (int) received_sigterm);
580 } else if (ret && transcode_init_done) {
581 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
587 void remove_avoptions(AVDictionary **a, AVDictionary *b)
589 AVDictionaryEntry *t = NULL;
591 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
596 void assert_avoptions(AVDictionary *m)
598 AVDictionaryEntry *t;
599 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
605 static void abort_codec_experimental(AVCodec *c, int encoder)
610 static void update_benchmark(const char *fmt, ...)
612 if (do_benchmark_all) {
613 int64_t t = getutime();
619 vsnprintf(buf, sizeof(buf), fmt, va);
621 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
630 for (i = 0; i < nb_output_streams; i++) {
631 OutputStream *ost2 = output_streams[i];
632 ost2->finished |= ost == ost2 ? this_stream : others;
636 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
638 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
639 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
642 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
643 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
644 if (ost->st->codec->extradata) {
645 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
646 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
650 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
651 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
652 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
655 * Audio encoders may split the packets -- #frames in != #packets out.
656 * But there is no reordering, so we can limit the number of output packets
657 * by simply dropping them here.
658 * Counting encoded video frames needs to be done separately because of
659 * reordering, see do_video_out()
661 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
662 if (ost->frame_number >= ost->max_frames) {
668 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
670 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
672 ost->quality = sd ? AV_RL32(sd) : -1;
673 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
675 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
677 ost->error[i] = AV_RL64(sd + 8 + 8*i);
684 av_packet_split_side_data(pkt);
687 AVPacket new_pkt = *pkt;
688 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
691 int a = av_bitstream_filter_filter(bsfc, avctx,
692 bsf_arg ? bsf_arg->value : NULL,
693 &new_pkt.data, &new_pkt.size,
694 pkt->data, pkt->size,
695 pkt->flags & AV_PKT_FLAG_KEY);
696 if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
697 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
699 memcpy(t, new_pkt.data, new_pkt.size);
700 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
708 pkt->side_data = NULL;
709 pkt->side_data_elems = 0;
711 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
712 av_buffer_default_free, NULL, 0);
717 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
718 bsfc->filter->name, pkt->stream_index,
719 avctx->codec ? avctx->codec->name : "copy");
729 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
730 if (pkt->dts != AV_NOPTS_VALUE &&
731 pkt->pts != AV_NOPTS_VALUE &&
732 pkt->dts > pkt->pts) {
733 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
735 ost->file_index, ost->st->index);
737 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
738 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
739 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
742 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
743 pkt->dts != AV_NOPTS_VALUE &&
744 ost->last_mux_dts != AV_NOPTS_VALUE) {
745 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
746 if (pkt->dts < max) {
747 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
748 av_log(s, loglevel, "Non-monotonous DTS in output stream "
749 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
750 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
752 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
755 av_log(s, loglevel, "changing to %"PRId64". This may result "
756 "in incorrect timestamps in the output file.\n",
758 if(pkt->pts >= pkt->dts)
759 pkt->pts = FFMAX(pkt->pts, max);
764 ost->last_mux_dts = pkt->dts;
766 ost->data_size += pkt->size;
767 ost->packets_written++;
769 pkt->stream_index = ost->index;
772 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
773 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
774 av_get_media_type_string(ost->enc_ctx->codec_type),
775 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
776 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
781 ret = av_interleaved_write_frame(s, pkt);
783 print_error("av_interleaved_write_frame()", ret);
784 main_return_code = 1;
785 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
790 static void close_output_stream(OutputStream *ost)
792 OutputFile *of = output_files[ost->file_index];
794 ost->finished |= ENCODER_FINISHED;
796 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
797 of->recording_time = FFMIN(of->recording_time, end);
801 static int check_recording_time(OutputStream *ost)
803 OutputFile *of = output_files[ost->file_index];
805 if (of->recording_time != INT64_MAX &&
806 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
807 AV_TIME_BASE_Q) >= 0) {
808 close_output_stream(ost);
814 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
817 AVCodecContext *enc = ost->enc_ctx;
821 av_init_packet(&pkt);
825 if (!check_recording_time(ost))
828 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
829 frame->pts = ost->sync_opts;
830 ost->sync_opts = frame->pts + frame->nb_samples;
831 ost->samples_encoded += frame->nb_samples;
832 ost->frames_encoded++;
834 av_assert0(pkt.size || !pkt.data);
835 update_benchmark(NULL);
837 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
838 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
839 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
840 enc->time_base.num, enc->time_base.den);
843 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
844 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
847 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
850 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
853 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
854 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
855 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
856 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
859 write_frame(s, &pkt, ost);
863 static void do_subtitle_out(AVFormatContext *s,
868 int subtitle_out_max_size = 1024 * 1024;
869 int subtitle_out_size, nb, i;
874 if (sub->pts == AV_NOPTS_VALUE) {
875 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
884 subtitle_out = av_malloc(subtitle_out_max_size);
886 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
891 /* Note: DVB subtitle need one packet to draw them and one other
892 packet to clear them */
893 /* XXX: signal it in the codec context ? */
894 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
899 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
901 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
902 pts -= output_files[ost->file_index]->start_time;
903 for (i = 0; i < nb; i++) {
904 unsigned save_num_rects = sub->num_rects;
906 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
907 if (!check_recording_time(ost))
911 // start_display_time is required to be 0
912 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
913 sub->end_display_time -= sub->start_display_time;
914 sub->start_display_time = 0;
918 ost->frames_encoded++;
920 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
921 subtitle_out_max_size, sub);
923 sub->num_rects = save_num_rects;
924 if (subtitle_out_size < 0) {
925 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
929 av_init_packet(&pkt);
930 pkt.data = subtitle_out;
931 pkt.size = subtitle_out_size;
932 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
933 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
934 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
935 /* XXX: the pts correction is handled here. Maybe handling
936 it in the codec would be better */
938 pkt.pts += 90 * sub->start_display_time;
940 pkt.pts += 90 * sub->end_display_time;
943 write_frame(s, &pkt, ost);
947 static void do_video_out(AVFormatContext *s,
949 AVFrame *next_picture,
952 int ret, format_video_sync;
954 AVCodecContext *enc = ost->enc_ctx;
955 AVCodecContext *mux_enc = ost->st->codec;
956 int nb_frames, nb0_frames, i;
957 double delta, delta0;
960 InputStream *ist = NULL;
961 AVFilterContext *filter = ost->filter->filter;
963 if (ost->source_index >= 0)
964 ist = input_streams[ost->source_index];
966 if (filter->inputs[0]->frame_rate.num > 0 &&
967 filter->inputs[0]->frame_rate.den > 0)
968 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
970 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
971 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
973 if (!ost->filters_script &&
977 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
978 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
983 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
984 ost->last_nb0_frames[1],
985 ost->last_nb0_frames[2]);
987 delta0 = sync_ipts - ost->sync_opts;
988 delta = delta0 + duration;
990 /* by default, we output a single frame */
994 format_video_sync = video_sync_method;
995 if (format_video_sync == VSYNC_AUTO) {
996 if(!strcmp(s->oformat->name, "avi")) {
997 format_video_sync = VSYNC_VFR;
999 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1001 && format_video_sync == VSYNC_CFR
1002 && input_files[ist->file_index]->ctx->nb_streams == 1
1003 && input_files[ist->file_index]->input_ts_offset == 0) {
1004 format_video_sync = VSYNC_VSCFR;
1006 if (format_video_sync == VSYNC_CFR && copy_ts) {
1007 format_video_sync = VSYNC_VSCFR;
1013 format_video_sync != VSYNC_PASSTHROUGH &&
1014 format_video_sync != VSYNC_DROP) {
1015 double cor = FFMIN(-delta0, duration);
1016 if (delta0 < -0.6) {
1017 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1019 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1025 switch (format_video_sync) {
1027 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1028 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1031 ost->sync_opts = lrint(sync_ipts);
1034 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1035 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1037 } else if (delta < -1.1)
1039 else if (delta > 1.1) {
1040 nb_frames = lrintf(delta);
1042 nb0_frames = lrintf(delta0 - 0.6);
1048 else if (delta > 0.6)
1049 ost->sync_opts = lrint(sync_ipts);
1052 case VSYNC_PASSTHROUGH:
1053 ost->sync_opts = lrint(sync_ipts);
1060 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1061 nb0_frames = FFMIN(nb0_frames, nb_frames);
1063 memmove(ost->last_nb0_frames + 1,
1064 ost->last_nb0_frames,
1065 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1066 ost->last_nb0_frames[0] = nb0_frames;
1068 if (nb0_frames == 0 && ost->last_droped) {
1070 av_log(NULL, AV_LOG_VERBOSE,
1071 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1072 ost->frame_number, ost->st->index, ost->last_frame->pts);
1074 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1075 if (nb_frames > dts_error_threshold * 30) {
1076 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1080 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1081 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1083 ost->last_droped = nb_frames == nb0_frames && next_picture;
1085 /* duplicates frame if needed */
1086 for (i = 0; i < nb_frames; i++) {
1087 AVFrame *in_picture;
1088 av_init_packet(&pkt);
1092 if (i < nb0_frames && ost->last_frame) {
1093 in_picture = ost->last_frame;
1095 in_picture = next_picture;
1100 in_picture->pts = ost->sync_opts;
1103 if (!check_recording_time(ost))
1105 if (ost->frame_number >= ost->max_frames)
1109 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1110 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1111 /* raw pictures are written as AVPicture structure to
1112 avoid any copies. We support temporarily the older
1114 if (in_picture->interlaced_frame)
1115 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1117 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1118 pkt.data = (uint8_t *)in_picture;
1119 pkt.size = sizeof(AVPicture);
1120 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1121 pkt.flags |= AV_PKT_FLAG_KEY;
1123 write_frame(s, &pkt, ost);
1125 int got_packet, forced_keyframe = 0;
1128 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1129 ost->top_field_first >= 0)
1130 in_picture->top_field_first = !!ost->top_field_first;
1132 if (in_picture->interlaced_frame) {
1133 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1134 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1136 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1138 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1140 in_picture->quality = enc->global_quality;
1141 in_picture->pict_type = 0;
1143 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1144 in_picture->pts * av_q2d(enc->time_base) : NAN;
1145 if (ost->forced_kf_index < ost->forced_kf_count &&
1146 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1147 ost->forced_kf_index++;
1148 forced_keyframe = 1;
1149 } else if (ost->forced_keyframes_pexpr) {
1151 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1152 res = av_expr_eval(ost->forced_keyframes_pexpr,
1153 ost->forced_keyframes_expr_const_values, NULL);
1154 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1155 ost->forced_keyframes_expr_const_values[FKF_N],
1156 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1157 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1158 ost->forced_keyframes_expr_const_values[FKF_T],
1159 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1162 forced_keyframe = 1;
1163 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1164 ost->forced_keyframes_expr_const_values[FKF_N];
1165 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1166 ost->forced_keyframes_expr_const_values[FKF_T];
1167 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1170 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1171 } else if ( ost->forced_keyframes
1172 && !strncmp(ost->forced_keyframes, "source", 6)
1173 && in_picture->key_frame==1) {
1174 forced_keyframe = 1;
1177 if (forced_keyframe) {
1178 in_picture->pict_type = AV_PICTURE_TYPE_I;
1179 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1182 update_benchmark(NULL);
1184 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1185 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1186 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1187 enc->time_base.num, enc->time_base.den);
1190 ost->frames_encoded++;
1192 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1193 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1195 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1201 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1202 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1203 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1204 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1207 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1208 pkt.pts = ost->sync_opts;
1210 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1213 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1214 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1215 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1216 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1219 frame_size = pkt.size;
1220 write_frame(s, &pkt, ost);
1222 /* if two pass, output log */
1223 if (ost->logfile && enc->stats_out) {
1224 fprintf(ost->logfile, "%s", enc->stats_out);
1230 * For video, number of frames in == number of packets out.
1231 * But there may be reordering, so we can't throw away frames on encoder
1232 * flush, we need to limit them here, before they go into encoder.
1234 ost->frame_number++;
1236 if (vstats_filename && frame_size)
1237 do_video_stats(ost, frame_size);
1240 if (!ost->last_frame)
1241 ost->last_frame = av_frame_alloc();
1242 av_frame_unref(ost->last_frame);
1243 if (next_picture && ost->last_frame)
1244 av_frame_ref(ost->last_frame, next_picture);
1246 av_frame_free(&ost->last_frame);
1249 static double psnr(double d)
1251 return -10.0 * log(d) / log(10.0);
1254 static void do_video_stats(OutputStream *ost, int frame_size)
1256 AVCodecContext *enc;
1258 double ti1, bitrate, avg_bitrate;
1260 /* this is executed just the first time do_video_stats is called */
1262 vstats_file = fopen(vstats_filename, "w");
1270 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1271 frame_number = ost->st->nb_frames;
1272 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1273 ost->quality / (float)FF_QP2LAMBDA);
1275 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1276 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1278 fprintf(vstats_file,"f_size= %6d ", frame_size);
1279 /* compute pts value */
1280 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1284 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1285 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1286 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1287 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1288 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1292 static void finish_output_stream(OutputStream *ost)
1294 OutputFile *of = output_files[ost->file_index];
1297 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1300 for (i = 0; i < of->ctx->nb_streams; i++)
1301 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1306 * Get and encode new output from any of the filtergraphs, without causing
1309 * @return 0 for success, <0 for severe errors
1311 static int reap_filters(int flush)
1313 AVFrame *filtered_frame = NULL;
1316 /* Reap all buffers present in the buffer sinks */
1317 for (i = 0; i < nb_output_streams; i++) {
1318 OutputStream *ost = output_streams[i];
1319 OutputFile *of = output_files[ost->file_index];
1320 AVFilterContext *filter;
1321 AVCodecContext *enc = ost->enc_ctx;
1326 filter = ost->filter->filter;
1328 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1329 return AVERROR(ENOMEM);
1331 filtered_frame = ost->filtered_frame;
1334 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1335 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1336 AV_BUFFERSINK_FLAG_NO_REQUEST);
1338 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1339 av_log(NULL, AV_LOG_WARNING,
1340 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1341 } else if (flush && ret == AVERROR_EOF) {
1342 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1343 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1347 if (ost->finished) {
1348 av_frame_unref(filtered_frame);
1351 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1352 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1353 AVRational tb = enc->time_base;
1354 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1356 tb.den <<= extra_bits;
1358 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1359 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1360 float_pts /= 1 << extra_bits;
1361 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1362 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1364 filtered_frame->pts =
1365 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1366 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1368 //if (ost->source_index >= 0)
1369 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1371 switch (filter->inputs[0]->type) {
1372 case AVMEDIA_TYPE_VIDEO:
1373 if (!ost->frame_aspect_ratio.num)
1374 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1377 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1378 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1380 enc->time_base.num, enc->time_base.den);
1383 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1385 case AVMEDIA_TYPE_AUDIO:
1386 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1387 enc->channels != av_frame_get_channels(filtered_frame)) {
1388 av_log(NULL, AV_LOG_ERROR,
1389 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1392 do_audio_out(of->ctx, ost, filtered_frame);
1395 // TODO support subtitle filters
1399 av_frame_unref(filtered_frame);
1406 static void print_final_stats(int64_t total_size)
1408 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1409 uint64_t subtitle_size = 0;
1410 uint64_t data_size = 0;
1411 float percent = -1.0;
1415 for (i = 0; i < nb_output_streams; i++) {
1416 OutputStream *ost = output_streams[i];
1417 switch (ost->enc_ctx->codec_type) {
1418 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1419 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1420 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1421 default: other_size += ost->data_size; break;
1423 extra_size += ost->enc_ctx->extradata_size;
1424 data_size += ost->data_size;
1425 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1426 != AV_CODEC_FLAG_PASS1)
1430 if (data_size && total_size>0 && total_size >= data_size)
1431 percent = 100.0 * (total_size - data_size) / data_size;
1433 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1434 video_size / 1024.0,
1435 audio_size / 1024.0,
1436 subtitle_size / 1024.0,
1437 other_size / 1024.0,
1438 extra_size / 1024.0);
1440 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1442 av_log(NULL, AV_LOG_INFO, "unknown");
1443 av_log(NULL, AV_LOG_INFO, "\n");
1445 /* print verbose per-stream stats */
1446 for (i = 0; i < nb_input_files; i++) {
1447 InputFile *f = input_files[i];
1448 uint64_t total_packets = 0, total_size = 0;
1450 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1451 i, f->ctx->filename);
1453 for (j = 0; j < f->nb_streams; j++) {
1454 InputStream *ist = input_streams[f->ist_index + j];
1455 enum AVMediaType type = ist->dec_ctx->codec_type;
1457 total_size += ist->data_size;
1458 total_packets += ist->nb_packets;
1460 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1461 i, j, media_type_string(type));
1462 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1463 ist->nb_packets, ist->data_size);
1465 if (ist->decoding_needed) {
1466 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1467 ist->frames_decoded);
1468 if (type == AVMEDIA_TYPE_AUDIO)
1469 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1470 av_log(NULL, AV_LOG_VERBOSE, "; ");
1473 av_log(NULL, AV_LOG_VERBOSE, "\n");
1476 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1477 total_packets, total_size);
1480 for (i = 0; i < nb_output_files; i++) {
1481 OutputFile *of = output_files[i];
1482 uint64_t total_packets = 0, total_size = 0;
1484 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1485 i, of->ctx->filename);
1487 for (j = 0; j < of->ctx->nb_streams; j++) {
1488 OutputStream *ost = output_streams[of->ost_index + j];
1489 enum AVMediaType type = ost->enc_ctx->codec_type;
1491 total_size += ost->data_size;
1492 total_packets += ost->packets_written;
1494 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1495 i, j, media_type_string(type));
1496 if (ost->encoding_needed) {
1497 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1498 ost->frames_encoded);
1499 if (type == AVMEDIA_TYPE_AUDIO)
1500 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1501 av_log(NULL, AV_LOG_VERBOSE, "; ");
1504 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1505 ost->packets_written, ost->data_size);
1507 av_log(NULL, AV_LOG_VERBOSE, "\n");
1510 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1511 total_packets, total_size);
1513 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1514 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1516 av_log(NULL, AV_LOG_WARNING, "\n");
1518 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1523 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1526 AVBPrint buf_script;
1528 AVFormatContext *oc;
1530 AVCodecContext *enc;
1531 int frame_number, vid, i;
1533 int64_t pts = INT64_MIN;
1534 static int64_t last_time = -1;
1535 static int qp_histogram[52];
1536 int hours, mins, secs, us;
1538 if (!print_stats && !is_last_report && !progress_avio)
1541 if (!is_last_report) {
1542 if (last_time == -1) {
1543 last_time = cur_time;
1546 if ((cur_time - last_time) < 500000)
1548 last_time = cur_time;
1552 oc = output_files[0]->ctx;
1554 total_size = avio_size(oc->pb);
1555 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1556 total_size = avio_tell(oc->pb);
1560 av_bprint_init(&buf_script, 0, 1);
1561 for (i = 0; i < nb_output_streams; i++) {
1563 ost = output_streams[i];
1565 if (!ost->stream_copy)
1566 q = ost->quality / (float) FF_QP2LAMBDA;
1568 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1569 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1570 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1571 ost->file_index, ost->index, q);
1573 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1574 float fps, t = (cur_time-timer_start) / 1000000.0;
1576 frame_number = ost->frame_number;
1577 fps = t > 1 ? frame_number / t : 0;
1578 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1579 frame_number, fps < 9.95, fps, q);
1580 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1581 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1582 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1583 ost->file_index, ost->index, q);
1585 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1589 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1591 for (j = 0; j < 32; j++)
1592 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1595 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1597 double error, error_sum = 0;
1598 double scale, scale_sum = 0;
1600 char type[3] = { 'Y','U','V' };
1601 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1602 for (j = 0; j < 3; j++) {
1603 if (is_last_report) {
1604 error = enc->error[j];
1605 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1607 error = ost->error[j];
1608 scale = enc->width * enc->height * 255.0 * 255.0;
1614 p = psnr(error / scale);
1615 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1616 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1617 ost->file_index, ost->index, type[j] | 32, p);
1619 p = psnr(error_sum / scale_sum);
1620 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1621 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1622 ost->file_index, ost->index, p);
1626 /* compute min output value */
1627 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1628 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1629 ost->st->time_base, AV_TIME_BASE_Q));
1631 nb_frames_drop += ost->last_droped;
1634 secs = FFABS(pts) / AV_TIME_BASE;
1635 us = FFABS(pts) % AV_TIME_BASE;
1641 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1643 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1645 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1646 "size=%8.0fkB time=", total_size / 1024.0);
1648 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1649 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1650 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1651 (100 * us) / AV_TIME_BASE);
1654 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1655 av_bprintf(&buf_script, "bitrate=N/A\n");
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1658 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1661 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1662 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1663 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1664 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1665 hours, mins, secs, us);
1667 if (nb_frames_dup || nb_frames_drop)
1668 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1669 nb_frames_dup, nb_frames_drop);
1670 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1671 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1673 if (print_stats || is_last_report) {
1674 const char end = is_last_report ? '\n' : '\r';
1675 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1676 fprintf(stderr, "%s %c", buf, end);
1678 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1683 if (progress_avio) {
1684 av_bprintf(&buf_script, "progress=%s\n",
1685 is_last_report ? "end" : "continue");
1686 avio_write(progress_avio, buf_script.str,
1687 FFMIN(buf_script.len, buf_script.size - 1));
1688 avio_flush(progress_avio);
1689 av_bprint_finalize(&buf_script, NULL);
1690 if (is_last_report) {
1691 avio_closep(&progress_avio);
1696 print_final_stats(total_size);
1699 static void flush_encoders(void)
1703 for (i = 0; i < nb_output_streams; i++) {
1704 OutputStream *ost = output_streams[i];
1705 AVCodecContext *enc = ost->enc_ctx;
1706 AVFormatContext *os = output_files[ost->file_index]->ctx;
1707 int stop_encoding = 0;
1709 if (!ost->encoding_needed)
1712 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1714 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1718 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1721 switch (enc->codec_type) {
1722 case AVMEDIA_TYPE_AUDIO:
1723 encode = avcodec_encode_audio2;
1726 case AVMEDIA_TYPE_VIDEO:
1727 encode = avcodec_encode_video2;
1738 av_init_packet(&pkt);
1742 update_benchmark(NULL);
1743 ret = encode(enc, &pkt, NULL, &got_packet);
1744 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1746 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1749 if (ost->logfile && enc->stats_out) {
1750 fprintf(ost->logfile, "%s", enc->stats_out);
1756 if (ost->finished & MUXER_FINISHED) {
1757 av_free_packet(&pkt);
1760 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1761 pkt_size = pkt.size;
1762 write_frame(os, &pkt, ost);
1763 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1764 do_video_stats(ost, pkt_size);
1775 * Check whether a packet from ist should be written into ost at this time
1777 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1779 OutputFile *of = output_files[ost->file_index];
1780 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1782 if (ost->source_index != ist_index)
1788 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1794 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1796 OutputFile *of = output_files[ost->file_index];
1797 InputFile *f = input_files [ist->file_index];
1798 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1799 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1800 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1804 av_init_packet(&opkt);
1806 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1807 !ost->copy_initial_nonkeyframes)
1810 if (pkt->pts == AV_NOPTS_VALUE) {
1811 if (!ost->frame_number && ist->pts < start_time &&
1812 !ost->copy_prior_start)
1815 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1816 !ost->copy_prior_start)
1820 if (of->recording_time != INT64_MAX &&
1821 ist->pts >= of->recording_time + start_time) {
1822 close_output_stream(ost);
1826 if (f->recording_time != INT64_MAX) {
1827 start_time = f->ctx->start_time;
1828 if (f->start_time != AV_NOPTS_VALUE)
1829 start_time += f->start_time;
1830 if (ist->pts >= f->recording_time + start_time) {
1831 close_output_stream(ost);
1836 /* force the input stream PTS */
1837 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1840 if (pkt->pts != AV_NOPTS_VALUE)
1841 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1843 opkt.pts = AV_NOPTS_VALUE;
1845 if (pkt->dts == AV_NOPTS_VALUE)
1846 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1848 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1849 opkt.dts -= ost_tb_start_time;
1851 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1852 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1854 duration = ist->dec_ctx->frame_size;
1855 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1856 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1857 ost->st->time_base) - ost_tb_start_time;
1860 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1861 opkt.flags = pkt->flags;
1863 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1864 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1865 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1866 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1867 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1869 if (av_parser_change(ost->parser, ost->st->codec,
1870 &opkt.data, &opkt.size,
1871 pkt->data, pkt->size,
1872 pkt->flags & AV_PKT_FLAG_KEY)) {
1873 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1878 opkt.data = pkt->data;
1879 opkt.size = pkt->size;
1881 av_copy_packet_side_data(&opkt, pkt);
1883 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1884 /* store AVPicture in AVPacket, as expected by the output format */
1885 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1886 opkt.data = (uint8_t *)&pict;
1887 opkt.size = sizeof(AVPicture);
1888 opkt.flags |= AV_PKT_FLAG_KEY;
1891 write_frame(of->ctx, &opkt, ost);
1894 int guess_input_channel_layout(InputStream *ist)
1896 AVCodecContext *dec = ist->dec_ctx;
1898 if (!dec->channel_layout) {
1899 char layout_name[256];
1901 if (dec->channels > ist->guess_layout_max)
1903 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1904 if (!dec->channel_layout)
1906 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1907 dec->channels, dec->channel_layout);
1908 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1909 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1914 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1916 AVFrame *decoded_frame, *f;
1917 AVCodecContext *avctx = ist->dec_ctx;
1918 int i, ret, err = 0, resample_changed;
1919 AVRational decoded_frame_tb;
1921 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1922 return AVERROR(ENOMEM);
1923 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1924 return AVERROR(ENOMEM);
1925 decoded_frame = ist->decoded_frame;
1927 update_benchmark(NULL);
1928 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1929 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1931 if (ret >= 0 && avctx->sample_rate <= 0) {
1932 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1933 ret = AVERROR_INVALIDDATA;
1936 if (*got_output || ret<0)
1937 decode_error_stat[ret<0] ++;
1939 if (ret < 0 && exit_on_error)
1942 if (!*got_output || ret < 0)
1945 ist->samples_decoded += decoded_frame->nb_samples;
1946 ist->frames_decoded++;
1949 /* increment next_dts to use for the case where the input stream does not
1950 have timestamps or there are multiple frames in the packet */
1951 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1953 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1957 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1958 ist->resample_channels != avctx->channels ||
1959 ist->resample_channel_layout != decoded_frame->channel_layout ||
1960 ist->resample_sample_rate != decoded_frame->sample_rate;
1961 if (resample_changed) {
1962 char layout1[64], layout2[64];
1964 if (!guess_input_channel_layout(ist)) {
1965 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1966 "layout for Input Stream #%d.%d\n", ist->file_index,
1970 decoded_frame->channel_layout = avctx->channel_layout;
1972 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1973 ist->resample_channel_layout);
1974 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1975 decoded_frame->channel_layout);
1977 av_log(NULL, AV_LOG_INFO,
1978 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1979 ist->file_index, ist->st->index,
1980 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1981 ist->resample_channels, layout1,
1982 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1983 avctx->channels, layout2);
1985 ist->resample_sample_fmt = decoded_frame->format;
1986 ist->resample_sample_rate = decoded_frame->sample_rate;
1987 ist->resample_channel_layout = decoded_frame->channel_layout;
1988 ist->resample_channels = avctx->channels;
1990 for (i = 0; i < nb_filtergraphs; i++)
1991 if (ist_in_filtergraph(filtergraphs[i], ist)) {
1992 FilterGraph *fg = filtergraphs[i];
1993 if (configure_filtergraph(fg) < 0) {
1994 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2000 /* if the decoder provides a pts, use it instead of the last packet pts.
2001 the decoder could be delaying output by a packet or more. */
2002 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2003 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2004 decoded_frame_tb = avctx->time_base;
2005 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2006 decoded_frame->pts = decoded_frame->pkt_pts;
2007 decoded_frame_tb = ist->st->time_base;
2008 } else if (pkt->pts != AV_NOPTS_VALUE) {
2009 decoded_frame->pts = pkt->pts;
2010 decoded_frame_tb = ist->st->time_base;
2012 decoded_frame->pts = ist->dts;
2013 decoded_frame_tb = AV_TIME_BASE_Q;
2015 pkt->pts = AV_NOPTS_VALUE;
2016 if (decoded_frame->pts != AV_NOPTS_VALUE)
2017 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2018 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2019 (AVRational){1, avctx->sample_rate});
2020 for (i = 0; i < ist->nb_filters; i++) {
2021 if (i < ist->nb_filters - 1) {
2022 f = ist->filter_frame;
2023 err = av_frame_ref(f, decoded_frame);
2028 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2029 AV_BUFFERSRC_FLAG_PUSH);
2030 if (err == AVERROR_EOF)
2031 err = 0; /* ignore */
2035 decoded_frame->pts = AV_NOPTS_VALUE;
2037 av_frame_unref(ist->filter_frame);
2038 av_frame_unref(decoded_frame);
2039 return err < 0 ? err : ret;
2042 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2044 AVFrame *decoded_frame, *f;
2045 int i, ret = 0, err = 0, resample_changed;
2046 int64_t best_effort_timestamp;
2047 AVRational *frame_sample_aspect;
2049 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2050 return AVERROR(ENOMEM);
2051 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2052 return AVERROR(ENOMEM);
2053 decoded_frame = ist->decoded_frame;
2054 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2056 update_benchmark(NULL);
2057 ret = avcodec_decode_video2(ist->dec_ctx,
2058 decoded_frame, got_output, pkt);
2059 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2061 // The following line may be required in some cases where there is no parser
2062 // or the parser does not has_b_frames correctly
2063 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2064 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2065 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2067 av_log(ist->dec_ctx, AV_LOG_WARNING,
2068 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2069 "If you want to help, upload a sample "
2070 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2071 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2072 ist->dec_ctx->has_b_frames,
2073 ist->st->codec->has_b_frames);
2076 if (*got_output || ret<0)
2077 decode_error_stat[ret<0] ++;
2079 if (ret < 0 && exit_on_error)
2082 if (*got_output && ret >= 0) {
2083 if (ist->dec_ctx->width != decoded_frame->width ||
2084 ist->dec_ctx->height != decoded_frame->height ||
2085 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2086 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2087 decoded_frame->width,
2088 decoded_frame->height,
2089 decoded_frame->format,
2090 ist->dec_ctx->width,
2091 ist->dec_ctx->height,
2092 ist->dec_ctx->pix_fmt);
2096 if (!*got_output || ret < 0)
2099 if(ist->top_field_first>=0)
2100 decoded_frame->top_field_first = ist->top_field_first;
2102 ist->frames_decoded++;
2104 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2105 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2109 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2111 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2112 if(best_effort_timestamp != AV_NOPTS_VALUE)
2113 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2116 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2117 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2118 ist->st->index, av_ts2str(decoded_frame->pts),
2119 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2120 best_effort_timestamp,
2121 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2122 decoded_frame->key_frame, decoded_frame->pict_type,
2123 ist->st->time_base.num, ist->st->time_base.den);
2128 if (ist->st->sample_aspect_ratio.num)
2129 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2131 resample_changed = ist->resample_width != decoded_frame->width ||
2132 ist->resample_height != decoded_frame->height ||
2133 ist->resample_pix_fmt != decoded_frame->format;
2134 if (resample_changed) {
2135 av_log(NULL, AV_LOG_INFO,
2136 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2137 ist->file_index, ist->st->index,
2138 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2139 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2141 ist->resample_width = decoded_frame->width;
2142 ist->resample_height = decoded_frame->height;
2143 ist->resample_pix_fmt = decoded_frame->format;
2145 for (i = 0; i < nb_filtergraphs; i++) {
2146 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2147 configure_filtergraph(filtergraphs[i]) < 0) {
2148 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2154 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2155 for (i = 0; i < ist->nb_filters; i++) {
2156 if (!frame_sample_aspect->num)
2157 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2159 if (i < ist->nb_filters - 1) {
2160 f = ist->filter_frame;
2161 err = av_frame_ref(f, decoded_frame);
2166 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2167 if (ret == AVERROR_EOF) {
2168 ret = 0; /* ignore */
2169 } else if (ret < 0) {
2170 av_log(NULL, AV_LOG_FATAL,
2171 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2177 av_frame_unref(ist->filter_frame);
2178 av_frame_unref(decoded_frame);
2179 return err < 0 ? err : ret;
2182 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2184 AVSubtitle subtitle;
2185 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2186 &subtitle, got_output, pkt);
2188 if (*got_output || ret<0)
2189 decode_error_stat[ret<0] ++;
2191 if (ret < 0 && exit_on_error)
2194 if (ret < 0 || !*got_output) {
2196 sub2video_flush(ist);
2200 if (ist->fix_sub_duration) {
2202 if (ist->prev_sub.got_output) {
2203 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2204 1000, AV_TIME_BASE);
2205 if (end < ist->prev_sub.subtitle.end_display_time) {
2206 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2207 "Subtitle duration reduced from %d to %d%s\n",
2208 ist->prev_sub.subtitle.end_display_time, end,
2209 end <= 0 ? ", dropping it" : "");
2210 ist->prev_sub.subtitle.end_display_time = end;
2213 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2214 FFSWAP(int, ret, ist->prev_sub.ret);
2215 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2223 sub2video_update(ist, &subtitle);
2225 if (!subtitle.num_rects)
2228 ist->frames_decoded++;
2230 for (i = 0; i < nb_output_streams; i++) {
2231 OutputStream *ost = output_streams[i];
2233 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2234 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2237 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2241 avsubtitle_free(&subtitle);
2245 static int send_filter_eof(InputStream *ist)
2248 for (i = 0; i < ist->nb_filters; i++) {
2249 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2256 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2257 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2263 if (!ist->saw_first_ts) {
2264 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2266 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2267 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2268 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2270 ist->saw_first_ts = 1;
2273 if (ist->next_dts == AV_NOPTS_VALUE)
2274 ist->next_dts = ist->dts;
2275 if (ist->next_pts == AV_NOPTS_VALUE)
2276 ist->next_pts = ist->pts;
2280 av_init_packet(&avpkt);
2288 if (pkt->dts != AV_NOPTS_VALUE) {
2289 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2290 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2291 ist->next_pts = ist->pts = ist->dts;
2294 // while we have more to decode or while the decoder did output something on EOF
2295 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2299 ist->pts = ist->next_pts;
2300 ist->dts = ist->next_dts;
2302 if (avpkt.size && avpkt.size != pkt->size &&
2303 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2304 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2305 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2306 ist->showed_multi_packet_warning = 1;
2309 switch (ist->dec_ctx->codec_type) {
2310 case AVMEDIA_TYPE_AUDIO:
2311 ret = decode_audio (ist, &avpkt, &got_output);
2313 case AVMEDIA_TYPE_VIDEO:
2314 ret = decode_video (ist, &avpkt, &got_output);
2315 if (avpkt.duration) {
2316 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2317 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2318 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2319 duration = ((int64_t)AV_TIME_BASE *
2320 ist->dec_ctx->framerate.den * ticks) /
2321 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2325 if(ist->dts != AV_NOPTS_VALUE && duration) {
2326 ist->next_dts += duration;
2328 ist->next_dts = AV_NOPTS_VALUE;
2331 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2333 case AVMEDIA_TYPE_SUBTITLE:
2334 ret = transcode_subtitles(ist, &avpkt, &got_output);
2341 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2342 ist->file_index, ist->st->index, av_err2str(ret));
2349 avpkt.pts= AV_NOPTS_VALUE;
2351 // touch data and size only if not EOF
2353 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2361 if (got_output && !pkt)
2365 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2366 if (!pkt && ist->decoding_needed && !got_output) {
2367 int ret = send_filter_eof(ist);
2369 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2374 /* handle stream copy */
2375 if (!ist->decoding_needed) {
2376 ist->dts = ist->next_dts;
2377 switch (ist->dec_ctx->codec_type) {
2378 case AVMEDIA_TYPE_AUDIO:
2379 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2380 ist->dec_ctx->sample_rate;
2382 case AVMEDIA_TYPE_VIDEO:
2383 if (ist->framerate.num) {
2384 // TODO: Remove work-around for c99-to-c89 issue 7
2385 AVRational time_base_q = AV_TIME_BASE_Q;
2386 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2387 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2388 } else if (pkt->duration) {
2389 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2390 } else if(ist->dec_ctx->framerate.num != 0) {
2391 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2392 ist->next_dts += ((int64_t)AV_TIME_BASE *
2393 ist->dec_ctx->framerate.den * ticks) /
2394 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2398 ist->pts = ist->dts;
2399 ist->next_pts = ist->next_dts;
2401 for (i = 0; pkt && i < nb_output_streams; i++) {
2402 OutputStream *ost = output_streams[i];
2404 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2407 do_streamcopy(ist, ost, pkt);
2413 static void print_sdp(void)
2418 AVIOContext *sdp_pb;
2419 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2423 for (i = 0, j = 0; i < nb_output_files; i++) {
2424 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2425 avc[j] = output_files[i]->ctx;
2430 av_sdp_create(avc, j, sdp, sizeof(sdp));
2432 if (!sdp_filename) {
2433 printf("SDP:\n%s\n", sdp);
2436 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2437 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2439 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2440 avio_closep(&sdp_pb);
2441 av_freep(&sdp_filename);
2448 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2451 for (i = 0; hwaccels[i].name; i++)
2452 if (hwaccels[i].pix_fmt == pix_fmt)
2453 return &hwaccels[i];
2457 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2459 InputStream *ist = s->opaque;
2460 const enum AVPixelFormat *p;
2463 for (p = pix_fmts; *p != -1; p++) {
2464 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2465 const HWAccel *hwaccel;
2467 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2470 hwaccel = get_hwaccel(*p);
2472 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2473 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2476 ret = hwaccel->init(s);
2478 if (ist->hwaccel_id == hwaccel->id) {
2479 av_log(NULL, AV_LOG_FATAL,
2480 "%s hwaccel requested for input stream #%d:%d, "
2481 "but cannot be initialized.\n", hwaccel->name,
2482 ist->file_index, ist->st->index);
2483 return AV_PIX_FMT_NONE;
2487 ist->active_hwaccel_id = hwaccel->id;
2488 ist->hwaccel_pix_fmt = *p;
2495 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2497 InputStream *ist = s->opaque;
2499 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2500 return ist->hwaccel_get_buffer(s, frame, flags);
2502 return avcodec_default_get_buffer2(s, frame, flags);
2505 static int init_input_stream(int ist_index, char *error, int error_len)
2508 InputStream *ist = input_streams[ist_index];
2510 if (ist->decoding_needed) {
2511 AVCodec *codec = ist->dec;
2513 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2514 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2515 return AVERROR(EINVAL);
2518 ist->dec_ctx->opaque = ist;
2519 ist->dec_ctx->get_format = get_format;
2520 ist->dec_ctx->get_buffer2 = get_buffer;
2521 ist->dec_ctx->thread_safe_callbacks = 1;
2523 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2524 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2525 (ist->decoding_needed & DECODING_FOR_OST)) {
2526 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2527 if (ist->decoding_needed & DECODING_FOR_FILTER)
2528 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2531 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2532 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2533 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2534 if (ret == AVERROR_EXPERIMENTAL)
2535 abort_codec_experimental(codec, 0);
2537 snprintf(error, error_len,
2538 "Error while opening decoder for input stream "
2540 ist->file_index, ist->st->index, av_err2str(ret));
2543 assert_avoptions(ist->decoder_opts);
2546 ist->next_pts = AV_NOPTS_VALUE;
2547 ist->next_dts = AV_NOPTS_VALUE;
2552 static InputStream *get_input_stream(OutputStream *ost)
2554 if (ost->source_index >= 0)
2555 return input_streams[ost->source_index];
2559 static int compare_int64(const void *a, const void *b)
2561 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2562 return va < vb ? -1 : va > vb ? +1 : 0;
2565 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2569 if (ost->encoding_needed) {
2570 AVCodec *codec = ost->enc;
2571 AVCodecContext *dec = NULL;
2574 if ((ist = get_input_stream(ost)))
2576 if (dec && dec->subtitle_header) {
2577 /* ASS code assumes this buffer is null terminated so add extra byte. */
2578 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2579 if (!ost->enc_ctx->subtitle_header)
2580 return AVERROR(ENOMEM);
2581 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2582 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2584 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2585 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2586 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2588 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2589 if (ret == AVERROR_EXPERIMENTAL)
2590 abort_codec_experimental(codec, 1);
2591 snprintf(error, error_len,
2592 "Error while opening encoder for output stream #%d:%d - "
2593 "maybe incorrect parameters such as bit_rate, rate, width or height",
2594 ost->file_index, ost->index);
2597 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2598 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2599 av_buffersink_set_frame_size(ost->filter->filter,
2600 ost->enc_ctx->frame_size);
2601 assert_avoptions(ost->encoder_opts);
2602 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2603 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2604 " It takes bits/s as argument, not kbits/s\n");
2606 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2608 av_log(NULL, AV_LOG_FATAL,
2609 "Error initializing the output stream codec context.\n");
2613 // copy timebase while removing common factors
2614 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2615 ost->st->codec->codec= ost->enc_ctx->codec;
2617 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2619 av_log(NULL, AV_LOG_FATAL,
2620 "Error setting up codec context options.\n");
2623 // copy timebase while removing common factors
2624 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2630 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2631 AVCodecContext *avctx)
2634 int n = 1, i, size, index = 0;
2637 for (p = kf; *p; p++)
2641 pts = av_malloc_array(size, sizeof(*pts));
2643 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2648 for (i = 0; i < n; i++) {
2649 char *next = strchr(p, ',');
2654 if (!memcmp(p, "chapters", 8)) {
2656 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2659 if (avf->nb_chapters > INT_MAX - size ||
2660 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2662 av_log(NULL, AV_LOG_FATAL,
2663 "Could not allocate forced key frames array.\n");
2666 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2667 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2669 for (j = 0; j < avf->nb_chapters; j++) {
2670 AVChapter *c = avf->chapters[j];
2671 av_assert1(index < size);
2672 pts[index++] = av_rescale_q(c->start, c->time_base,
2673 avctx->time_base) + t;
2678 t = parse_time_or_die("force_key_frames", p, 1);
2679 av_assert1(index < size);
2680 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2687 av_assert0(index == size);
2688 qsort(pts, size, sizeof(*pts), compare_int64);
2689 ost->forced_kf_count = size;
2690 ost->forced_kf_pts = pts;
2693 static void report_new_stream(int input_index, AVPacket *pkt)
2695 InputFile *file = input_files[input_index];
2696 AVStream *st = file->ctx->streams[pkt->stream_index];
2698 if (pkt->stream_index < file->nb_streams_warn)
2700 av_log(file->ctx, AV_LOG_WARNING,
2701 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2702 av_get_media_type_string(st->codec->codec_type),
2703 input_index, pkt->stream_index,
2704 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2705 file->nb_streams_warn = pkt->stream_index + 1;
2708 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2710 AVDictionaryEntry *e;
2712 uint8_t *encoder_string;
2713 int encoder_string_len;
2714 int format_flags = 0;
2715 int codec_flags = 0;
2717 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2720 e = av_dict_get(of->opts, "fflags", NULL, 0);
2722 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2725 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2727 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2729 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2732 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2735 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2736 encoder_string = av_mallocz(encoder_string_len);
2737 if (!encoder_string)
2740 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2741 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2743 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2744 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2745 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2746 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2749 static int transcode_init(void)
2751 int ret = 0, i, j, k;
2752 AVFormatContext *oc;
2755 char error[1024] = {0};
2758 for (i = 0; i < nb_filtergraphs; i++) {
2759 FilterGraph *fg = filtergraphs[i];
2760 for (j = 0; j < fg->nb_outputs; j++) {
2761 OutputFilter *ofilter = fg->outputs[j];
2762 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2764 if (fg->nb_inputs != 1)
2766 for (k = nb_input_streams-1; k >= 0 ; k--)
2767 if (fg->inputs[0]->ist == input_streams[k])
2769 ofilter->ost->source_index = k;
2773 /* init framerate emulation */
2774 for (i = 0; i < nb_input_files; i++) {
2775 InputFile *ifile = input_files[i];
2776 if (ifile->rate_emu)
2777 for (j = 0; j < ifile->nb_streams; j++)
2778 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2781 /* for each output stream, we compute the right encoding parameters */
2782 for (i = 0; i < nb_output_streams; i++) {
2783 AVCodecContext *enc_ctx;
2784 AVCodecContext *dec_ctx = NULL;
2785 ost = output_streams[i];
2786 oc = output_files[ost->file_index]->ctx;
2787 ist = get_input_stream(ost);
2789 if (ost->attachment_filename)
2792 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2795 dec_ctx = ist->dec_ctx;
2797 ost->st->disposition = ist->st->disposition;
2798 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2799 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2801 for (j=0; j<oc->nb_streams; j++) {
2802 AVStream *st = oc->streams[j];
2803 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2806 if (j == oc->nb_streams)
2807 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2808 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2811 if (ost->stream_copy) {
2813 uint64_t extra_size;
2815 av_assert0(ist && !ost->filter);
2817 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2819 if (extra_size > INT_MAX) {
2820 return AVERROR(EINVAL);
2823 /* if stream_copy is selected, no need to decode or encode */
2824 enc_ctx->codec_id = dec_ctx->codec_id;
2825 enc_ctx->codec_type = dec_ctx->codec_type;
2827 if (!enc_ctx->codec_tag) {
2828 unsigned int codec_tag;
2829 if (!oc->oformat->codec_tag ||
2830 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2831 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2832 enc_ctx->codec_tag = dec_ctx->codec_tag;
2835 enc_ctx->bit_rate = dec_ctx->bit_rate;
2836 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2837 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2838 enc_ctx->field_order = dec_ctx->field_order;
2839 if (dec_ctx->extradata_size) {
2840 enc_ctx->extradata = av_mallocz(extra_size);
2841 if (!enc_ctx->extradata) {
2842 return AVERROR(ENOMEM);
2844 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2846 enc_ctx->extradata_size= dec_ctx->extradata_size;
2847 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2849 enc_ctx->time_base = ist->st->time_base;
2851 * Avi is a special case here because it supports variable fps but
2852 * having the fps and timebase differe significantly adds quite some
2855 if(!strcmp(oc->oformat->name, "avi")) {
2856 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2857 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2858 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2859 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2861 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2862 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2863 enc_ctx->ticks_per_frame = 2;
2864 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2865 && av_q2d(ist->st->time_base) < 1.0/500
2867 enc_ctx->time_base = dec_ctx->time_base;
2868 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2869 enc_ctx->time_base.den *= 2;
2870 enc_ctx->ticks_per_frame = 2;
2872 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2873 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2874 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2875 && strcmp(oc->oformat->name, "f4v")
2877 if( copy_tb<0 && dec_ctx->time_base.den
2878 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2879 && av_q2d(ist->st->time_base) < 1.0/500
2881 enc_ctx->time_base = dec_ctx->time_base;
2882 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2885 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2886 && dec_ctx->time_base.num < dec_ctx->time_base.den
2887 && dec_ctx->time_base.num > 0
2888 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2889 enc_ctx->time_base = dec_ctx->time_base;
2892 if (ist && !ost->frame_rate.num)
2893 ost->frame_rate = ist->framerate;
2894 if(ost->frame_rate.num)
2895 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2897 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2898 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2900 if (ist->st->nb_side_data) {
2901 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2902 sizeof(*ist->st->side_data));
2903 if (!ost->st->side_data)
2904 return AVERROR(ENOMEM);
2906 ost->st->nb_side_data = 0;
2907 for (j = 0; j < ist->st->nb_side_data; j++) {
2908 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2909 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2911 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2914 sd_dst->data = av_malloc(sd_src->size);
2916 return AVERROR(ENOMEM);
2917 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2918 sd_dst->size = sd_src->size;
2919 sd_dst->type = sd_src->type;
2920 ost->st->nb_side_data++;
2924 ost->parser = av_parser_init(enc_ctx->codec_id);
2926 switch (enc_ctx->codec_type) {
2927 case AVMEDIA_TYPE_AUDIO:
2928 if (audio_volume != 256) {
2929 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2932 enc_ctx->channel_layout = dec_ctx->channel_layout;
2933 enc_ctx->sample_rate = dec_ctx->sample_rate;
2934 enc_ctx->channels = dec_ctx->channels;
2935 enc_ctx->frame_size = dec_ctx->frame_size;
2936 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2937 enc_ctx->block_align = dec_ctx->block_align;
2938 enc_ctx->initial_padding = dec_ctx->delay;
2939 #if FF_API_AUDIOENC_DELAY
2940 enc_ctx->delay = dec_ctx->delay;
2942 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2943 enc_ctx->block_align= 0;
2944 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2945 enc_ctx->block_align= 0;
2947 case AVMEDIA_TYPE_VIDEO:
2948 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2949 enc_ctx->width = dec_ctx->width;
2950 enc_ctx->height = dec_ctx->height;
2951 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2952 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2954 av_mul_q(ost->frame_aspect_ratio,
2955 (AVRational){ enc_ctx->height, enc_ctx->width });
2956 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2957 "with stream copy may produce invalid files\n");
2959 else if (ist->st->sample_aspect_ratio.num)
2960 sar = ist->st->sample_aspect_ratio;
2962 sar = dec_ctx->sample_aspect_ratio;
2963 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2964 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2965 ost->st->r_frame_rate = ist->st->r_frame_rate;
2967 case AVMEDIA_TYPE_SUBTITLE:
2968 enc_ctx->width = dec_ctx->width;
2969 enc_ctx->height = dec_ctx->height;
2971 case AVMEDIA_TYPE_UNKNOWN:
2972 case AVMEDIA_TYPE_DATA:
2973 case AVMEDIA_TYPE_ATTACHMENT:
2980 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2982 /* should only happen when a default codec is not present. */
2983 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2984 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2985 ret = AVERROR(EINVAL);
2989 set_encoder_id(output_files[ost->file_index], ost);
2992 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2993 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2995 fg = init_simple_filtergraph(ist, ost);
2996 if (configure_filtergraph(fg)) {
2997 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3002 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3003 if (!ost->frame_rate.num)
3004 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3005 if (ist && !ost->frame_rate.num)
3006 ost->frame_rate = ist->framerate;
3007 if (ist && !ost->frame_rate.num)
3008 ost->frame_rate = ist->st->r_frame_rate;
3009 if (ist && !ost->frame_rate.num) {
3010 ost->frame_rate = (AVRational){25, 1};
3011 av_log(NULL, AV_LOG_WARNING,
3013 "about the input framerate is available. Falling "
3014 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3015 "if you want a different framerate.\n",
3016 ost->file_index, ost->index);
3018 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3019 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3020 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3021 ost->frame_rate = ost->enc->supported_framerates[idx];
3023 // reduce frame rate for mpeg4 to be within the spec limits
3024 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3025 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3026 ost->frame_rate.num, ost->frame_rate.den, 65535);
3030 switch (enc_ctx->codec_type) {
3031 case AVMEDIA_TYPE_AUDIO:
3032 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3033 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3034 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3035 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3036 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3038 case AVMEDIA_TYPE_VIDEO:
3039 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3040 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3041 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3042 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3043 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3044 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3045 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3047 for (j = 0; j < ost->forced_kf_count; j++)
3048 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3050 enc_ctx->time_base);
3052 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3053 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3054 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3055 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3056 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3057 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3058 if (!strncmp(ost->enc->name, "libx264", 7) &&
3059 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3060 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3061 av_log(NULL, AV_LOG_WARNING,
3062 "No pixel format specified, %s for H.264 encoding chosen.\n"
3063 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3064 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3065 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3066 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3067 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3068 av_log(NULL, AV_LOG_WARNING,
3069 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3070 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3071 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3072 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3074 ost->st->avg_frame_rate = ost->frame_rate;
3077 enc_ctx->width != dec_ctx->width ||
3078 enc_ctx->height != dec_ctx->height ||
3079 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3080 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3083 if (ost->forced_keyframes) {
3084 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3085 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3086 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3088 av_log(NULL, AV_LOG_ERROR,
3089 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3092 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3093 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3094 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3095 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3097 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3098 // parse it only for static kf timings
3099 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3100 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3104 case AVMEDIA_TYPE_SUBTITLE:
3105 enc_ctx->time_base = (AVRational){1, 1000};
3106 if (!enc_ctx->width) {
3107 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3108 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3111 case AVMEDIA_TYPE_DATA:
3119 if (ost->disposition) {
3120 static const AVOption opts[] = {
3121 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3122 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3123 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3124 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3125 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3126 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3127 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3128 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3129 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3130 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3131 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3132 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3133 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3134 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3137 static const AVClass class = {
3139 .item_name = av_default_item_name,
3141 .version = LIBAVUTIL_VERSION_INT,
3143 const AVClass *pclass = &class;
3145 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3151 /* open each encoder */
3152 for (i = 0; i < nb_output_streams; i++) {
3153 ret = init_output_stream(output_streams[i], error, sizeof(error));
3158 /* init input streams */
3159 for (i = 0; i < nb_input_streams; i++)
3160 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3161 for (i = 0; i < nb_output_streams; i++) {
3162 ost = output_streams[i];
3163 avcodec_close(ost->enc_ctx);
3168 /* discard unused programs */
3169 for (i = 0; i < nb_input_files; i++) {
3170 InputFile *ifile = input_files[i];
3171 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3172 AVProgram *p = ifile->ctx->programs[j];
3173 int discard = AVDISCARD_ALL;
3175 for (k = 0; k < p->nb_stream_indexes; k++)
3176 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3177 discard = AVDISCARD_DEFAULT;
3180 p->discard = discard;
3184 /* open files and write file headers */
3185 for (i = 0; i < nb_output_files; i++) {
3186 oc = output_files[i]->ctx;
3187 oc->interrupt_callback = int_cb;
3188 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3189 snprintf(error, sizeof(error),
3190 "Could not write header for output file #%d "
3191 "(incorrect codec parameters ?): %s",
3192 i, av_err2str(ret));
3193 ret = AVERROR(EINVAL);
3196 // assert_avoptions(output_files[i]->opts);
3197 if (strcmp(oc->oformat->name, "rtp")) {
3203 /* dump the file output parameters - cannot be done before in case
3205 for (i = 0; i < nb_output_files; i++) {
3206 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3209 /* dump the stream mapping */
3210 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3211 for (i = 0; i < nb_input_streams; i++) {
3212 ist = input_streams[i];
3214 for (j = 0; j < ist->nb_filters; j++) {
3215 if (ist->filters[j]->graph->graph_desc) {
3216 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3217 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3218 ist->filters[j]->name);
3219 if (nb_filtergraphs > 1)
3220 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3221 av_log(NULL, AV_LOG_INFO, "\n");
3226 for (i = 0; i < nb_output_streams; i++) {
3227 ost = output_streams[i];
3229 if (ost->attachment_filename) {
3230 /* an attached file */
3231 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3232 ost->attachment_filename, ost->file_index, ost->index);
3236 if (ost->filter && ost->filter->graph->graph_desc) {
3237 /* output from a complex graph */
3238 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3239 if (nb_filtergraphs > 1)
3240 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3242 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3243 ost->index, ost->enc ? ost->enc->name : "?");
3247 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3248 input_streams[ost->source_index]->file_index,
3249 input_streams[ost->source_index]->st->index,
3252 if (ost->sync_ist != input_streams[ost->source_index])
3253 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3254 ost->sync_ist->file_index,
3255 ost->sync_ist->st->index);
3256 if (ost->stream_copy)
3257 av_log(NULL, AV_LOG_INFO, " (copy)");
3259 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3260 const AVCodec *out_codec = ost->enc;
3261 const char *decoder_name = "?";
3262 const char *in_codec_name = "?";
3263 const char *encoder_name = "?";
3264 const char *out_codec_name = "?";
3265 const AVCodecDescriptor *desc;
3268 decoder_name = in_codec->name;
3269 desc = avcodec_descriptor_get(in_codec->id);
3271 in_codec_name = desc->name;
3272 if (!strcmp(decoder_name, in_codec_name))
3273 decoder_name = "native";
3277 encoder_name = out_codec->name;
3278 desc = avcodec_descriptor_get(out_codec->id);
3280 out_codec_name = desc->name;
3281 if (!strcmp(encoder_name, out_codec_name))
3282 encoder_name = "native";
3285 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3286 in_codec_name, decoder_name,
3287 out_codec_name, encoder_name);
3289 av_log(NULL, AV_LOG_INFO, "\n");
3293 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3297 if (sdp_filename || want_sdp) {
3301 transcode_init_done = 1;
3306 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3307 static int need_output(void)
3311 for (i = 0; i < nb_output_streams; i++) {
3312 OutputStream *ost = output_streams[i];
3313 OutputFile *of = output_files[ost->file_index];
3314 AVFormatContext *os = output_files[ost->file_index]->ctx;
3316 if (ost->finished ||
3317 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3319 if (ost->frame_number >= ost->max_frames) {
3321 for (j = 0; j < of->ctx->nb_streams; j++)
3322 close_output_stream(output_streams[of->ost_index + j]);
3333 * Select the output stream to process.
3335 * @return selected output stream, or NULL if none available
3337 static OutputStream *choose_output(void)
3340 int64_t opts_min = INT64_MAX;
3341 OutputStream *ost_min = NULL;
3343 for (i = 0; i < nb_output_streams; i++) {
3344 OutputStream *ost = output_streams[i];
3345 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3347 if (!ost->finished && opts < opts_min) {
3349 ost_min = ost->unavailable ? NULL : ost;
3355 static int check_keyboard_interaction(int64_t cur_time)
3358 static int64_t last_time;
3359 if (received_nb_signals)
3360 return AVERROR_EXIT;
3361 /* read_key() returns 0 on EOF */
3362 if(cur_time - last_time >= 100000 && !run_as_daemon){
3364 last_time = cur_time;
3368 return AVERROR_EXIT;
3369 if (key == '+') av_log_set_level(av_log_get_level()+10);
3370 if (key == '-') av_log_set_level(av_log_get_level()-10);
3371 if (key == 's') qp_hist ^= 1;
3374 do_hex_dump = do_pkt_dump = 0;
3375 } else if(do_pkt_dump){
3379 av_log_set_level(AV_LOG_DEBUG);
3381 if (key == 'c' || key == 'C'){
3382 char buf[4096], target[64], command[256], arg[256] = {0};
3385 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3387 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3392 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3393 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3394 target, time, command, arg);
3395 for (i = 0; i < nb_filtergraphs; i++) {
3396 FilterGraph *fg = filtergraphs[i];
3399 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3400 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3401 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3402 } else if (key == 'c') {
3403 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3404 ret = AVERROR_PATCHWELCOME;
3406 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3408 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3413 av_log(NULL, AV_LOG_ERROR,
3414 "Parse error, at least 3 arguments were expected, "
3415 "only %d given in string '%s'\n", n, buf);
3418 if (key == 'd' || key == 'D'){
3421 debug = input_streams[0]->st->codec->debug<<1;
3422 if(!debug) debug = 1;
3423 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3429 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3433 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3434 fprintf(stderr,"error parsing debug value\n");
3436 for(i=0;i<nb_input_streams;i++) {
3437 input_streams[i]->st->codec->debug = debug;
3439 for(i=0;i<nb_output_streams;i++) {
3440 OutputStream *ost = output_streams[i];
3441 ost->enc_ctx->debug = debug;
3443 if(debug) av_log_set_level(AV_LOG_DEBUG);
3444 fprintf(stderr,"debug=%d\n", debug);
3447 fprintf(stderr, "key function\n"
3448 "? show this help\n"
3449 "+ increase verbosity\n"
3450 "- decrease verbosity\n"
3451 "c Send command to first matching filter supporting it\n"
3452 "C Send/Que command to all matching filters\n"
3453 "D cycle through available debug modes\n"
3454 "h dump packets/hex press to cycle through the 3 states\n"
3456 "s Show QP histogram\n"
3463 static void *input_thread(void *arg)
3466 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3471 ret = av_read_frame(f->ctx, &pkt);
3473 if (ret == AVERROR(EAGAIN)) {
3478 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3481 av_dup_packet(&pkt);
3482 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3483 if (flags && ret == AVERROR(EAGAIN)) {
3485 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3486 av_log(f->ctx, AV_LOG_WARNING,
3487 "Thread message queue blocking; consider raising the "
3488 "thread_queue_size option (current value: %d)\n",
3489 f->thread_queue_size);
3492 if (ret != AVERROR_EOF)
3493 av_log(f->ctx, AV_LOG_ERROR,
3494 "Unable to send packet to main thread: %s\n",
3496 av_free_packet(&pkt);
3497 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3505 static void free_input_threads(void)
3509 for (i = 0; i < nb_input_files; i++) {
3510 InputFile *f = input_files[i];
3513 if (!f || !f->in_thread_queue)
3515 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3516 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3517 av_free_packet(&pkt);
3519 pthread_join(f->thread, NULL);
3521 av_thread_message_queue_free(&f->in_thread_queue);
3525 static int init_input_threads(void)
3529 if (nb_input_files == 1)
3532 for (i = 0; i < nb_input_files; i++) {
3533 InputFile *f = input_files[i];
3535 if (f->ctx->pb ? !f->ctx->pb->seekable :
3536 strcmp(f->ctx->iformat->name, "lavfi"))
3537 f->non_blocking = 1;
3538 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3539 f->thread_queue_size, sizeof(AVPacket));
3543 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3544 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3545 av_thread_message_queue_free(&f->in_thread_queue);
3546 return AVERROR(ret);
3552 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3554 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3556 AV_THREAD_MESSAGE_NONBLOCK : 0);
3560 static int get_input_packet(InputFile *f, AVPacket *pkt)
3564 for (i = 0; i < f->nb_streams; i++) {
3565 InputStream *ist = input_streams[f->ist_index + i];
3566 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3567 int64_t now = av_gettime_relative() - ist->start;
3569 return AVERROR(EAGAIN);
3574 if (nb_input_files > 1)
3575 return get_input_packet_mt(f, pkt);
3577 return av_read_frame(f->ctx, pkt);
3580 static int got_eagain(void)
3583 for (i = 0; i < nb_output_streams; i++)
3584 if (output_streams[i]->unavailable)
3589 static void reset_eagain(void)
3592 for (i = 0; i < nb_input_files; i++)
3593 input_files[i]->eagain = 0;
3594 for (i = 0; i < nb_output_streams; i++)
3595 output_streams[i]->unavailable = 0;
3600 * - 0 -- one packet was read and processed
3601 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3602 * this function should be called again
3603 * - AVERROR_EOF -- this function should not be called again
3605 static int process_input(int file_index)
3607 InputFile *ifile = input_files[file_index];
3608 AVFormatContext *is;
3614 ret = get_input_packet(ifile, &pkt);
3616 if (ret == AVERROR(EAGAIN)) {
3621 if (ret != AVERROR_EOF) {
3622 print_error(is->filename, ret);
3627 for (i = 0; i < ifile->nb_streams; i++) {
3628 ist = input_streams[ifile->ist_index + i];
3629 if (ist->decoding_needed) {
3630 ret = process_input_packet(ist, NULL);
3635 /* mark all outputs that don't go through lavfi as finished */
3636 for (j = 0; j < nb_output_streams; j++) {
3637 OutputStream *ost = output_streams[j];
3639 if (ost->source_index == ifile->ist_index + i &&
3640 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3641 finish_output_stream(ost);
3645 ifile->eof_reached = 1;
3646 return AVERROR(EAGAIN);
3652 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3653 is->streams[pkt.stream_index]);
3655 /* the following test is needed in case new streams appear
3656 dynamically in stream : we ignore them */
3657 if (pkt.stream_index >= ifile->nb_streams) {
3658 report_new_stream(file_index, &pkt);
3659 goto discard_packet;
3662 ist = input_streams[ifile->ist_index + pkt.stream_index];
3664 ist->data_size += pkt.size;
3668 goto discard_packet;
3671 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3672 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3673 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3674 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3675 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3676 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3677 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3678 av_ts2str(input_files[ist->file_index]->ts_offset),
3679 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3682 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3683 int64_t stime, stime2;
3684 // Correcting starttime based on the enabled streams
3685 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3686 // so we instead do it here as part of discontinuity handling
3687 if ( ist->next_dts == AV_NOPTS_VALUE
3688 && ifile->ts_offset == -is->start_time
3689 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3690 int64_t new_start_time = INT64_MAX;
3691 for (i=0; i<is->nb_streams; i++) {
3692 AVStream *st = is->streams[i];
3693 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3695 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3697 if (new_start_time > is->start_time) {
3698 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3699 ifile->ts_offset = -new_start_time;
3703 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3704 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3705 ist->wrap_correction_done = 1;
3707 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3708 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3709 ist->wrap_correction_done = 0;
3711 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3712 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3713 ist->wrap_correction_done = 0;
3717 /* add the stream-global side data to the first packet */
3718 if (ist->nb_packets == 1) {
3719 if (ist->st->nb_side_data)
3720 av_packet_split_side_data(&pkt);
3721 for (i = 0; i < ist->st->nb_side_data; i++) {
3722 AVPacketSideData *src_sd = &ist->st->side_data[i];
3725 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3727 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3730 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3734 memcpy(dst_data, src_sd->data, src_sd->size);
3738 if (pkt.dts != AV_NOPTS_VALUE)
3739 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3740 if (pkt.pts != AV_NOPTS_VALUE)
3741 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3743 if (pkt.pts != AV_NOPTS_VALUE)
3744 pkt.pts *= ist->ts_scale;
3745 if (pkt.dts != AV_NOPTS_VALUE)
3746 pkt.dts *= ist->ts_scale;
3748 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3749 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3750 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3751 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3752 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3753 int64_t delta = pkt_dts - ifile->last_ts;
3754 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3755 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3756 ifile->ts_offset -= delta;
3757 av_log(NULL, AV_LOG_DEBUG,
3758 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3759 delta, ifile->ts_offset);
3760 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3761 if (pkt.pts != AV_NOPTS_VALUE)
3762 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3766 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3767 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3768 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3770 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3771 int64_t delta = pkt_dts - ist->next_dts;
3772 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3773 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3774 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3775 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3776 ifile->ts_offset -= delta;
3777 av_log(NULL, AV_LOG_DEBUG,
3778 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3779 delta, ifile->ts_offset);
3780 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3781 if (pkt.pts != AV_NOPTS_VALUE)
3782 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3785 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3786 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3787 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3788 pkt.dts = AV_NOPTS_VALUE;
3790 if (pkt.pts != AV_NOPTS_VALUE){
3791 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3792 delta = pkt_pts - ist->next_dts;
3793 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3794 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3795 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3796 pkt.pts = AV_NOPTS_VALUE;
3802 if (pkt.dts != AV_NOPTS_VALUE)
3803 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3806 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3807 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3808 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3809 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3810 av_ts2str(input_files[ist->file_index]->ts_offset),
3811 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3814 sub2video_heartbeat(ist, pkt.pts);
3816 process_input_packet(ist, &pkt);
3819 av_free_packet(&pkt);
3825 * Perform a step of transcoding for the specified filter graph.
3827 * @param[in] graph filter graph to consider
3828 * @param[out] best_ist input stream where a frame would allow to continue
3829 * @return 0 for success, <0 for error
3831 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3834 int nb_requests, nb_requests_max = 0;
3835 InputFilter *ifilter;
3839 ret = avfilter_graph_request_oldest(graph->graph);
3841 return reap_filters(0);
3843 if (ret == AVERROR_EOF) {
3844 ret = reap_filters(1);
3845 for (i = 0; i < graph->nb_outputs; i++)
3846 close_output_stream(graph->outputs[i]->ost);
3849 if (ret != AVERROR(EAGAIN))
3852 for (i = 0; i < graph->nb_inputs; i++) {
3853 ifilter = graph->inputs[i];
3855 if (input_files[ist->file_index]->eagain ||
3856 input_files[ist->file_index]->eof_reached)
3858 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3859 if (nb_requests > nb_requests_max) {
3860 nb_requests_max = nb_requests;
3866 for (i = 0; i < graph->nb_outputs; i++)
3867 graph->outputs[i]->ost->unavailable = 1;
3873 * Run a single step of transcoding.
3875 * @return 0 for success, <0 for error
3877 static int transcode_step(void)
3883 ost = choose_output();
3890 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3895 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3900 av_assert0(ost->source_index >= 0);
3901 ist = input_streams[ost->source_index];
3904 ret = process_input(ist->file_index);
3905 if (ret == AVERROR(EAGAIN)) {
3906 if (input_files[ist->file_index]->eagain)
3907 ost->unavailable = 1;
3912 return ret == AVERROR_EOF ? 0 : ret;
3914 return reap_filters(0);
3918 * The following code is the main loop of the file converter
3920 static int transcode(void)
3923 AVFormatContext *os;
3926 int64_t timer_start;
3928 ret = transcode_init();
3932 if (stdin_interaction) {
3933 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3936 timer_start = av_gettime_relative();
3939 if ((ret = init_input_threads()) < 0)
3943 while (!received_sigterm) {
3944 int64_t cur_time= av_gettime_relative();
3946 /* if 'q' pressed, exits */
3947 if (stdin_interaction)
3948 if (check_keyboard_interaction(cur_time) < 0)
3951 /* check if there's any stream where output is still needed */
3952 if (!need_output()) {
3953 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3957 ret = transcode_step();
3959 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3963 av_strerror(ret, errbuf, sizeof(errbuf));
3965 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3970 /* dump report by using the output first video and audio streams */
3971 print_report(0, timer_start, cur_time);
3974 free_input_threads();
3977 /* at the end of stream, we must flush the decoder buffers */
3978 for (i = 0; i < nb_input_streams; i++) {
3979 ist = input_streams[i];
3980 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3981 process_input_packet(ist, NULL);
3988 /* write the trailer if needed and close file */
3989 for (i = 0; i < nb_output_files; i++) {
3990 os = output_files[i]->ctx;
3991 av_write_trailer(os);
3994 /* dump report by using the first video and audio streams */
3995 print_report(1, timer_start, av_gettime_relative());
3997 /* close each encoder */
3998 for (i = 0; i < nb_output_streams; i++) {
3999 ost = output_streams[i];
4000 if (ost->encoding_needed) {
4001 av_freep(&ost->enc_ctx->stats_in);
4005 /* close each decoder */
4006 for (i = 0; i < nb_input_streams; i++) {
4007 ist = input_streams[i];
4008 if (ist->decoding_needed) {
4009 avcodec_close(ist->dec_ctx);
4010 if (ist->hwaccel_uninit)
4011 ist->hwaccel_uninit(ist->dec_ctx);
4020 free_input_threads();
4023 if (output_streams) {
4024 for (i = 0; i < nb_output_streams; i++) {
4025 ost = output_streams[i];
4028 fclose(ost->logfile);
4029 ost->logfile = NULL;
4031 av_freep(&ost->forced_kf_pts);
4032 av_freep(&ost->apad);
4033 av_freep(&ost->disposition);
4034 av_dict_free(&ost->encoder_opts);
4035 av_dict_free(&ost->sws_dict);
4036 av_dict_free(&ost->swr_opts);
4037 av_dict_free(&ost->resample_opts);
4038 av_dict_free(&ost->bsf_args);
4046 static int64_t getutime(void)
4049 struct rusage rusage;
4051 getrusage(RUSAGE_SELF, &rusage);
4052 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4053 #elif HAVE_GETPROCESSTIMES
4055 FILETIME c, e, k, u;
4056 proc = GetCurrentProcess();
4057 GetProcessTimes(proc, &c, &e, &k, &u);
4058 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4060 return av_gettime_relative();
4064 static int64_t getmaxrss(void)
4066 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4067 struct rusage rusage;
4068 getrusage(RUSAGE_SELF, &rusage);
4069 return (int64_t)rusage.ru_maxrss * 1024;
4070 #elif HAVE_GETPROCESSMEMORYINFO
4072 PROCESS_MEMORY_COUNTERS memcounters;
4073 proc = GetCurrentProcess();
4074 memcounters.cb = sizeof(memcounters);
4075 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4076 return memcounters.PeakPagefileUsage;
4082 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4086 int main(int argc, char **argv)
4091 register_exit(ffmpeg_cleanup);
4093 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4095 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4096 parse_loglevel(argc, argv, options);
4098 if(argc>1 && !strcmp(argv[1], "-d")){
4100 av_log_set_callback(log_callback_null);
4105 avcodec_register_all();
4107 avdevice_register_all();
4109 avfilter_register_all();
4111 avformat_network_init();
4113 show_banner(argc, argv, options);
4117 /* parse options and open all input/output files */
4118 ret = ffmpeg_parse_options(argc, argv);
4122 if (nb_output_files <= 0 && nb_input_files == 0) {
4124 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4128 /* file converter / grab */
4129 if (nb_output_files <= 0) {
4130 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4134 // if (nb_input_files == 0) {
4135 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4139 current_time = ti = getutime();
4140 if (transcode() < 0)
4142 ti = getutime() - ti;
4144 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4146 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4147 decode_error_stat[0], decode_error_stat[1]);
4148 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4151 exit_program(received_nb_signals ? 255 : main_return_code);
4152 return main_return_code;