2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avcodec.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static int nb_frames_drop = 0;
133 static int64_t decode_error_stat[2];
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
203 src = r->pict.data[0];
204 pal = (uint32_t *)r->pict.data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->pict.linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 istty = isatty(0) && isatty(2);
378 if (istty && tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > transcode_init_done;
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 av_freep(&fg->inputs[j]->name);
480 av_freep(&fg->inputs[j]);
482 av_freep(&fg->inputs);
483 for (j = 0; j < fg->nb_outputs; j++) {
484 av_freep(&fg->outputs[j]->name);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
512 AVBitStreamFilterContext *bsfc;
517 bsfc = ost->bitstream_filters;
519 AVBitStreamFilterContext *next = bsfc->next;
520 av_bitstream_filter_close(bsfc);
523 ost->bitstream_filters = NULL;
524 av_frame_free(&ost->filtered_frame);
525 av_frame_free(&ost->last_frame);
527 av_parser_close(ost->parser);
529 av_freep(&ost->forced_keyframes);
530 av_expr_free(ost->forced_keyframes_pexpr);
531 av_freep(&ost->avfilter);
532 av_freep(&ost->logfile_prefix);
534 av_freep(&ost->audio_channels_map);
535 ost->audio_channels_mapped = 0;
537 avcodec_free_context(&ost->enc_ctx);
539 av_freep(&output_streams[i]);
542 free_input_threads();
544 for (i = 0; i < nb_input_files; i++) {
545 avformat_close_input(&input_files[i]->ctx);
546 av_freep(&input_files[i]);
548 for (i = 0; i < nb_input_streams; i++) {
549 InputStream *ist = input_streams[i];
551 av_frame_free(&ist->decoded_frame);
552 av_frame_free(&ist->filter_frame);
553 av_dict_free(&ist->decoder_opts);
554 avsubtitle_free(&ist->prev_sub.subtitle);
555 av_frame_free(&ist->sub2video.frame);
556 av_freep(&ist->filters);
557 av_freep(&ist->hwaccel_device);
559 avcodec_free_context(&ist->dec_ctx);
561 av_freep(&input_streams[i]);
566 av_freep(&vstats_filename);
568 av_freep(&input_streams);
569 av_freep(&input_files);
570 av_freep(&output_streams);
571 av_freep(&output_files);
575 avformat_network_deinit();
577 if (received_sigterm) {
578 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579 (int) received_sigterm);
580 } else if (ret && transcode_init_done) {
581 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
587 void remove_avoptions(AVDictionary **a, AVDictionary *b)
589 AVDictionaryEntry *t = NULL;
591 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
596 void assert_avoptions(AVDictionary *m)
598 AVDictionaryEntry *t;
599 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
605 static void abort_codec_experimental(AVCodec *c, int encoder)
610 static void update_benchmark(const char *fmt, ...)
612 if (do_benchmark_all) {
613 int64_t t = getutime();
619 vsnprintf(buf, sizeof(buf), fmt, va);
621 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
630 for (i = 0; i < nb_output_streams; i++) {
631 OutputStream *ost2 = output_streams[i];
632 ost2->finished |= ost == ost2 ? this_stream : others;
636 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
638 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
639 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
642 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
643 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
644 if (ost->st->codec->extradata) {
645 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
646 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
650 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
651 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
652 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
655 * Audio encoders may split the packets -- #frames in != #packets out.
656 * But there is no reordering, so we can limit the number of output packets
657 * by simply dropping them here.
658 * Counting encoded video frames needs to be done separately because of
659 * reordering, see do_video_out()
661 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
662 if (ost->frame_number >= ost->max_frames) {
668 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
670 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
672 ost->quality = sd ? AV_RL32(sd) : -1;
673 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
675 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
677 ost->error[i] = AV_RL64(sd + 8 + 8*i);
684 av_packet_split_side_data(pkt);
687 AVPacket new_pkt = *pkt;
688 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
691 int a = av_bitstream_filter_filter(bsfc, avctx,
692 bsf_arg ? bsf_arg->value : NULL,
693 &new_pkt.data, &new_pkt.size,
694 pkt->data, pkt->size,
695 pkt->flags & AV_PKT_FLAG_KEY);
696 FF_DISABLE_DEPRECATION_WARNINGS
697 if(a == 0 && new_pkt.data != pkt->data
698 #if FF_API_DESTRUCT_PACKET
702 FF_ENABLE_DEPRECATION_WARNINGS
703 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
705 memcpy(t, new_pkt.data, new_pkt.size);
706 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
714 pkt->side_data = NULL;
715 pkt->side_data_elems = 0;
717 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
718 av_buffer_default_free, NULL, 0);
723 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
724 bsfc->filter->name, pkt->stream_index,
725 avctx->codec ? avctx->codec->name : "copy");
735 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
736 if (pkt->dts != AV_NOPTS_VALUE &&
737 pkt->pts != AV_NOPTS_VALUE &&
738 pkt->dts > pkt->pts) {
739 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
741 ost->file_index, ost->st->index);
743 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
744 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
745 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
748 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
749 pkt->dts != AV_NOPTS_VALUE &&
750 ost->last_mux_dts != AV_NOPTS_VALUE) {
751 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
752 if (pkt->dts < max) {
753 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
754 av_log(s, loglevel, "Non-monotonous DTS in output stream "
755 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
756 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
758 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
761 av_log(s, loglevel, "changing to %"PRId64". This may result "
762 "in incorrect timestamps in the output file.\n",
764 if(pkt->pts >= pkt->dts)
765 pkt->pts = FFMAX(pkt->pts, max);
770 ost->last_mux_dts = pkt->dts;
772 ost->data_size += pkt->size;
773 ost->packets_written++;
775 pkt->stream_index = ost->index;
778 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
779 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
780 av_get_media_type_string(ost->enc_ctx->codec_type),
781 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
782 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
787 ret = av_interleaved_write_frame(s, pkt);
789 print_error("av_interleaved_write_frame()", ret);
790 main_return_code = 1;
791 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
796 static void close_output_stream(OutputStream *ost)
798 OutputFile *of = output_files[ost->file_index];
800 ost->finished |= ENCODER_FINISHED;
802 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
803 of->recording_time = FFMIN(of->recording_time, end);
807 static int check_recording_time(OutputStream *ost)
809 OutputFile *of = output_files[ost->file_index];
811 if (of->recording_time != INT64_MAX &&
812 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
813 AV_TIME_BASE_Q) >= 0) {
814 close_output_stream(ost);
820 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
823 AVCodecContext *enc = ost->enc_ctx;
827 av_init_packet(&pkt);
831 if (!check_recording_time(ost))
834 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
835 frame->pts = ost->sync_opts;
836 ost->sync_opts = frame->pts + frame->nb_samples;
837 ost->samples_encoded += frame->nb_samples;
838 ost->frames_encoded++;
840 av_assert0(pkt.size || !pkt.data);
841 update_benchmark(NULL);
843 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
844 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
845 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
846 enc->time_base.num, enc->time_base.den);
849 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
850 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
853 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
856 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
859 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
860 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
861 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
862 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
865 write_frame(s, &pkt, ost);
869 static void do_subtitle_out(AVFormatContext *s,
874 int subtitle_out_max_size = 1024 * 1024;
875 int subtitle_out_size, nb, i;
880 if (sub->pts == AV_NOPTS_VALUE) {
881 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
890 subtitle_out = av_malloc(subtitle_out_max_size);
892 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
897 /* Note: DVB subtitle need one packet to draw them and one other
898 packet to clear them */
899 /* XXX: signal it in the codec context ? */
900 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
905 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
907 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
908 pts -= output_files[ost->file_index]->start_time;
909 for (i = 0; i < nb; i++) {
910 unsigned save_num_rects = sub->num_rects;
912 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
913 if (!check_recording_time(ost))
917 // start_display_time is required to be 0
918 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
919 sub->end_display_time -= sub->start_display_time;
920 sub->start_display_time = 0;
924 ost->frames_encoded++;
926 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
927 subtitle_out_max_size, sub);
929 sub->num_rects = save_num_rects;
930 if (subtitle_out_size < 0) {
931 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
935 av_init_packet(&pkt);
936 pkt.data = subtitle_out;
937 pkt.size = subtitle_out_size;
938 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
939 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
940 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
941 /* XXX: the pts correction is handled here. Maybe handling
942 it in the codec would be better */
944 pkt.pts += 90 * sub->start_display_time;
946 pkt.pts += 90 * sub->end_display_time;
949 write_frame(s, &pkt, ost);
953 static void do_video_out(AVFormatContext *s,
955 AVFrame *next_picture,
958 int ret, format_video_sync;
960 AVCodecContext *enc = ost->enc_ctx;
961 AVCodecContext *mux_enc = ost->st->codec;
962 int nb_frames, nb0_frames, i;
963 double delta, delta0;
966 InputStream *ist = NULL;
967 AVFilterContext *filter = ost->filter->filter;
969 if (ost->source_index >= 0)
970 ist = input_streams[ost->source_index];
972 if (filter->inputs[0]->frame_rate.num > 0 &&
973 filter->inputs[0]->frame_rate.den > 0)
974 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
976 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
977 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
979 if (!ost->filters_script &&
983 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
984 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
989 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
990 ost->last_nb0_frames[1],
991 ost->last_nb0_frames[2]);
993 delta0 = sync_ipts - ost->sync_opts;
994 delta = delta0 + duration;
996 /* by default, we output a single frame */
1000 format_video_sync = video_sync_method;
1001 if (format_video_sync == VSYNC_AUTO) {
1002 if(!strcmp(s->oformat->name, "avi")) {
1003 format_video_sync = VSYNC_VFR;
1005 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1007 && format_video_sync == VSYNC_CFR
1008 && input_files[ist->file_index]->ctx->nb_streams == 1
1009 && input_files[ist->file_index]->input_ts_offset == 0) {
1010 format_video_sync = VSYNC_VSCFR;
1012 if (format_video_sync == VSYNC_CFR && copy_ts) {
1013 format_video_sync = VSYNC_VSCFR;
1019 format_video_sync != VSYNC_PASSTHROUGH &&
1020 format_video_sync != VSYNC_DROP) {
1021 double cor = FFMIN(-delta0, duration);
1022 if (delta0 < -0.6) {
1023 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1025 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1031 switch (format_video_sync) {
1033 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1034 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1037 ost->sync_opts = lrint(sync_ipts);
1040 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1041 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1043 } else if (delta < -1.1)
1045 else if (delta > 1.1) {
1046 nb_frames = lrintf(delta);
1048 nb0_frames = lrintf(delta0 - 0.6);
1054 else if (delta > 0.6)
1055 ost->sync_opts = lrint(sync_ipts);
1058 case VSYNC_PASSTHROUGH:
1059 ost->sync_opts = lrint(sync_ipts);
1066 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1067 nb0_frames = FFMIN(nb0_frames, nb_frames);
1069 memmove(ost->last_nb0_frames + 1,
1070 ost->last_nb0_frames,
1071 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1072 ost->last_nb0_frames[0] = nb0_frames;
1074 if (nb0_frames == 0 && ost->last_droped) {
1076 av_log(NULL, AV_LOG_VERBOSE,
1077 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1078 ost->frame_number, ost->st->index, ost->last_frame->pts);
1080 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1081 if (nb_frames > dts_error_threshold * 30) {
1082 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1086 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1087 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1089 ost->last_droped = nb_frames == nb0_frames && next_picture;
1091 /* duplicates frame if needed */
1092 for (i = 0; i < nb_frames; i++) {
1093 AVFrame *in_picture;
1094 av_init_packet(&pkt);
1098 if (i < nb0_frames && ost->last_frame) {
1099 in_picture = ost->last_frame;
1101 in_picture = next_picture;
1106 in_picture->pts = ost->sync_opts;
1109 if (!check_recording_time(ost))
1111 if (ost->frame_number >= ost->max_frames)
1115 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1116 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1117 /* raw pictures are written as AVPicture structure to
1118 avoid any copies. We support temporarily the older
1120 if (in_picture->interlaced_frame)
1121 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1123 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1124 pkt.data = (uint8_t *)in_picture;
1125 pkt.size = sizeof(AVPicture);
1126 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1127 pkt.flags |= AV_PKT_FLAG_KEY;
1129 write_frame(s, &pkt, ost);
1131 int got_packet, forced_keyframe = 0;
1134 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1135 ost->top_field_first >= 0)
1136 in_picture->top_field_first = !!ost->top_field_first;
1138 if (in_picture->interlaced_frame) {
1139 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1140 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1142 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1144 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1146 in_picture->quality = enc->global_quality;
1147 in_picture->pict_type = 0;
1149 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1150 in_picture->pts * av_q2d(enc->time_base) : NAN;
1151 if (ost->forced_kf_index < ost->forced_kf_count &&
1152 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1153 ost->forced_kf_index++;
1154 forced_keyframe = 1;
1155 } else if (ost->forced_keyframes_pexpr) {
1157 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1158 res = av_expr_eval(ost->forced_keyframes_pexpr,
1159 ost->forced_keyframes_expr_const_values, NULL);
1160 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1161 ost->forced_keyframes_expr_const_values[FKF_N],
1162 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1163 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1164 ost->forced_keyframes_expr_const_values[FKF_T],
1165 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1168 forced_keyframe = 1;
1169 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1170 ost->forced_keyframes_expr_const_values[FKF_N];
1171 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1172 ost->forced_keyframes_expr_const_values[FKF_T];
1173 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1176 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1177 } else if ( ost->forced_keyframes
1178 && !strncmp(ost->forced_keyframes, "source", 6)
1179 && in_picture->key_frame==1) {
1180 forced_keyframe = 1;
1183 if (forced_keyframe) {
1184 in_picture->pict_type = AV_PICTURE_TYPE_I;
1185 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1188 update_benchmark(NULL);
1190 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1191 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1192 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1193 enc->time_base.num, enc->time_base.den);
1196 ost->frames_encoded++;
1198 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1199 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1201 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1207 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1208 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1209 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1210 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1213 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1214 pkt.pts = ost->sync_opts;
1216 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1219 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1220 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1221 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1222 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1225 frame_size = pkt.size;
1226 write_frame(s, &pkt, ost);
1228 /* if two pass, output log */
1229 if (ost->logfile && enc->stats_out) {
1230 fprintf(ost->logfile, "%s", enc->stats_out);
1236 * For video, number of frames in == number of packets out.
1237 * But there may be reordering, so we can't throw away frames on encoder
1238 * flush, we need to limit them here, before they go into encoder.
1240 ost->frame_number++;
1242 if (vstats_filename && frame_size)
1243 do_video_stats(ost, frame_size);
1246 if (!ost->last_frame)
1247 ost->last_frame = av_frame_alloc();
1248 av_frame_unref(ost->last_frame);
1249 if (next_picture && ost->last_frame)
1250 av_frame_ref(ost->last_frame, next_picture);
1252 av_frame_free(&ost->last_frame);
1255 static double psnr(double d)
1257 return -10.0 * log(d) / log(10.0);
1260 static void do_video_stats(OutputStream *ost, int frame_size)
1262 AVCodecContext *enc;
1264 double ti1, bitrate, avg_bitrate;
1266 /* this is executed just the first time do_video_stats is called */
1268 vstats_file = fopen(vstats_filename, "w");
1276 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1277 frame_number = ost->st->nb_frames;
1278 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1279 ost->quality / (float)FF_QP2LAMBDA);
1281 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1282 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1284 fprintf(vstats_file,"f_size= %6d ", frame_size);
1285 /* compute pts value */
1286 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1290 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1291 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1292 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1293 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1294 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1298 static void finish_output_stream(OutputStream *ost)
1300 OutputFile *of = output_files[ost->file_index];
1303 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1306 for (i = 0; i < of->ctx->nb_streams; i++)
1307 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1312 * Get and encode new output from any of the filtergraphs, without causing
1315 * @return 0 for success, <0 for severe errors
1317 static int reap_filters(int flush)
1319 AVFrame *filtered_frame = NULL;
1322 /* Reap all buffers present in the buffer sinks */
1323 for (i = 0; i < nb_output_streams; i++) {
1324 OutputStream *ost = output_streams[i];
1325 OutputFile *of = output_files[ost->file_index];
1326 AVFilterContext *filter;
1327 AVCodecContext *enc = ost->enc_ctx;
1332 filter = ost->filter->filter;
1334 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1335 return AVERROR(ENOMEM);
1337 filtered_frame = ost->filtered_frame;
1340 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1341 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1342 AV_BUFFERSINK_FLAG_NO_REQUEST);
1344 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1345 av_log(NULL, AV_LOG_WARNING,
1346 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1347 } else if (flush && ret == AVERROR_EOF) {
1348 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1349 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1353 if (ost->finished) {
1354 av_frame_unref(filtered_frame);
1357 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1358 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1359 AVRational tb = enc->time_base;
1360 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1362 tb.den <<= extra_bits;
1364 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1365 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1366 float_pts /= 1 << extra_bits;
1367 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1368 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1370 filtered_frame->pts =
1371 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1372 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1374 //if (ost->source_index >= 0)
1375 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1377 switch (filter->inputs[0]->type) {
1378 case AVMEDIA_TYPE_VIDEO:
1379 if (!ost->frame_aspect_ratio.num)
1380 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1383 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1384 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1386 enc->time_base.num, enc->time_base.den);
1389 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1391 case AVMEDIA_TYPE_AUDIO:
1392 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1393 enc->channels != av_frame_get_channels(filtered_frame)) {
1394 av_log(NULL, AV_LOG_ERROR,
1395 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1398 do_audio_out(of->ctx, ost, filtered_frame);
1401 // TODO support subtitle filters
1405 av_frame_unref(filtered_frame);
1412 static void print_final_stats(int64_t total_size)
1414 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1415 uint64_t subtitle_size = 0;
1416 uint64_t data_size = 0;
1417 float percent = -1.0;
1421 for (i = 0; i < nb_output_streams; i++) {
1422 OutputStream *ost = output_streams[i];
1423 switch (ost->enc_ctx->codec_type) {
1424 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1425 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1426 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1427 default: other_size += ost->data_size; break;
1429 extra_size += ost->enc_ctx->extradata_size;
1430 data_size += ost->data_size;
1431 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1432 != AV_CODEC_FLAG_PASS1)
1436 if (data_size && total_size>0 && total_size >= data_size)
1437 percent = 100.0 * (total_size - data_size) / data_size;
1439 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1440 video_size / 1024.0,
1441 audio_size / 1024.0,
1442 subtitle_size / 1024.0,
1443 other_size / 1024.0,
1444 extra_size / 1024.0);
1446 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1448 av_log(NULL, AV_LOG_INFO, "unknown");
1449 av_log(NULL, AV_LOG_INFO, "\n");
1451 /* print verbose per-stream stats */
1452 for (i = 0; i < nb_input_files; i++) {
1453 InputFile *f = input_files[i];
1454 uint64_t total_packets = 0, total_size = 0;
1456 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1457 i, f->ctx->filename);
1459 for (j = 0; j < f->nb_streams; j++) {
1460 InputStream *ist = input_streams[f->ist_index + j];
1461 enum AVMediaType type = ist->dec_ctx->codec_type;
1463 total_size += ist->data_size;
1464 total_packets += ist->nb_packets;
1466 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1467 i, j, media_type_string(type));
1468 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1469 ist->nb_packets, ist->data_size);
1471 if (ist->decoding_needed) {
1472 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1473 ist->frames_decoded);
1474 if (type == AVMEDIA_TYPE_AUDIO)
1475 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1476 av_log(NULL, AV_LOG_VERBOSE, "; ");
1479 av_log(NULL, AV_LOG_VERBOSE, "\n");
1482 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1483 total_packets, total_size);
1486 for (i = 0; i < nb_output_files; i++) {
1487 OutputFile *of = output_files[i];
1488 uint64_t total_packets = 0, total_size = 0;
1490 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1491 i, of->ctx->filename);
1493 for (j = 0; j < of->ctx->nb_streams; j++) {
1494 OutputStream *ost = output_streams[of->ost_index + j];
1495 enum AVMediaType type = ost->enc_ctx->codec_type;
1497 total_size += ost->data_size;
1498 total_packets += ost->packets_written;
1500 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1501 i, j, media_type_string(type));
1502 if (ost->encoding_needed) {
1503 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1504 ost->frames_encoded);
1505 if (type == AVMEDIA_TYPE_AUDIO)
1506 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1507 av_log(NULL, AV_LOG_VERBOSE, "; ");
1510 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1511 ost->packets_written, ost->data_size);
1513 av_log(NULL, AV_LOG_VERBOSE, "\n");
1516 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1517 total_packets, total_size);
1519 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1520 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1522 av_log(NULL, AV_LOG_WARNING, "\n");
1524 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1529 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1532 AVBPrint buf_script;
1534 AVFormatContext *oc;
1536 AVCodecContext *enc;
1537 int frame_number, vid, i;
1539 int64_t pts = INT64_MIN;
1540 static int64_t last_time = -1;
1541 static int qp_histogram[52];
1542 int hours, mins, secs, us;
1544 if (!print_stats && !is_last_report && !progress_avio)
1547 if (!is_last_report) {
1548 if (last_time == -1) {
1549 last_time = cur_time;
1552 if ((cur_time - last_time) < 500000)
1554 last_time = cur_time;
1558 oc = output_files[0]->ctx;
1560 total_size = avio_size(oc->pb);
1561 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1562 total_size = avio_tell(oc->pb);
1566 av_bprint_init(&buf_script, 0, 1);
1567 for (i = 0; i < nb_output_streams; i++) {
1569 ost = output_streams[i];
1571 if (!ost->stream_copy)
1572 q = ost->quality / (float) FF_QP2LAMBDA;
1574 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1575 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1576 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1577 ost->file_index, ost->index, q);
1579 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1580 float fps, t = (cur_time-timer_start) / 1000000.0;
1582 frame_number = ost->frame_number;
1583 fps = t > 1 ? frame_number / t : 0;
1584 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1585 frame_number, fps < 9.95, fps, q);
1586 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1587 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1588 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1589 ost->file_index, ost->index, q);
1591 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1595 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1597 for (j = 0; j < 32; j++)
1598 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1601 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1603 double error, error_sum = 0;
1604 double scale, scale_sum = 0;
1606 char type[3] = { 'Y','U','V' };
1607 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1608 for (j = 0; j < 3; j++) {
1609 if (is_last_report) {
1610 error = enc->error[j];
1611 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1613 error = ost->error[j];
1614 scale = enc->width * enc->height * 255.0 * 255.0;
1620 p = psnr(error / scale);
1621 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1622 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1623 ost->file_index, ost->index, type[j] | 32, p);
1625 p = psnr(error_sum / scale_sum);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1627 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1628 ost->file_index, ost->index, p);
1632 /* compute min output value */
1633 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1634 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1635 ost->st->time_base, AV_TIME_BASE_Q));
1637 nb_frames_drop += ost->last_droped;
1640 secs = FFABS(pts) / AV_TIME_BASE;
1641 us = FFABS(pts) % AV_TIME_BASE;
1647 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1649 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1651 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1652 "size=%8.0fkB time=", total_size / 1024.0);
1654 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1656 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1657 (100 * us) / AV_TIME_BASE);
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1661 av_bprintf(&buf_script, "bitrate=N/A\n");
1663 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1664 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1667 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1668 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1669 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1670 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1671 hours, mins, secs, us);
1673 if (nb_frames_dup || nb_frames_drop)
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1675 nb_frames_dup, nb_frames_drop);
1676 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1677 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1679 if (print_stats || is_last_report) {
1680 const char end = is_last_report ? '\n' : '\r';
1681 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1682 fprintf(stderr, "%s %c", buf, end);
1684 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1689 if (progress_avio) {
1690 av_bprintf(&buf_script, "progress=%s\n",
1691 is_last_report ? "end" : "continue");
1692 avio_write(progress_avio, buf_script.str,
1693 FFMIN(buf_script.len, buf_script.size - 1));
1694 avio_flush(progress_avio);
1695 av_bprint_finalize(&buf_script, NULL);
1696 if (is_last_report) {
1697 avio_closep(&progress_avio);
1702 print_final_stats(total_size);
1705 static void flush_encoders(void)
1709 for (i = 0; i < nb_output_streams; i++) {
1710 OutputStream *ost = output_streams[i];
1711 AVCodecContext *enc = ost->enc_ctx;
1712 AVFormatContext *os = output_files[ost->file_index]->ctx;
1713 int stop_encoding = 0;
1715 if (!ost->encoding_needed)
1718 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1720 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1724 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1727 switch (enc->codec_type) {
1728 case AVMEDIA_TYPE_AUDIO:
1729 encode = avcodec_encode_audio2;
1732 case AVMEDIA_TYPE_VIDEO:
1733 encode = avcodec_encode_video2;
1744 av_init_packet(&pkt);
1748 update_benchmark(NULL);
1749 ret = encode(enc, &pkt, NULL, &got_packet);
1750 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1752 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1757 if (ost->logfile && enc->stats_out) {
1758 fprintf(ost->logfile, "%s", enc->stats_out);
1764 if (ost->finished & MUXER_FINISHED) {
1765 av_free_packet(&pkt);
1768 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1769 pkt_size = pkt.size;
1770 write_frame(os, &pkt, ost);
1771 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1772 do_video_stats(ost, pkt_size);
1783 * Check whether a packet from ist should be written into ost at this time
1785 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1787 OutputFile *of = output_files[ost->file_index];
1788 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1790 if (ost->source_index != ist_index)
1796 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1802 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1804 OutputFile *of = output_files[ost->file_index];
1805 InputFile *f = input_files [ist->file_index];
1806 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1807 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1808 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1812 av_init_packet(&opkt);
1814 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1815 !ost->copy_initial_nonkeyframes)
1818 if (pkt->pts == AV_NOPTS_VALUE) {
1819 if (!ost->frame_number && ist->pts < start_time &&
1820 !ost->copy_prior_start)
1823 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1824 !ost->copy_prior_start)
1828 if (of->recording_time != INT64_MAX &&
1829 ist->pts >= of->recording_time + start_time) {
1830 close_output_stream(ost);
1834 if (f->recording_time != INT64_MAX) {
1835 start_time = f->ctx->start_time;
1836 if (f->start_time != AV_NOPTS_VALUE)
1837 start_time += f->start_time;
1838 if (ist->pts >= f->recording_time + start_time) {
1839 close_output_stream(ost);
1844 /* force the input stream PTS */
1845 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1848 if (pkt->pts != AV_NOPTS_VALUE)
1849 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1851 opkt.pts = AV_NOPTS_VALUE;
1853 if (pkt->dts == AV_NOPTS_VALUE)
1854 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1856 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1857 opkt.dts -= ost_tb_start_time;
1859 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1860 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1862 duration = ist->dec_ctx->frame_size;
1863 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1864 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1865 ost->st->time_base) - ost_tb_start_time;
1868 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1869 opkt.flags = pkt->flags;
1870 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1871 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1872 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1873 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1874 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1876 int ret = av_parser_change(ost->parser, ost->st->codec,
1877 &opkt.data, &opkt.size,
1878 pkt->data, pkt->size,
1879 pkt->flags & AV_PKT_FLAG_KEY);
1881 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1886 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1891 opkt.data = pkt->data;
1892 opkt.size = pkt->size;
1894 av_copy_packet_side_data(&opkt, pkt);
1896 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1897 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1898 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1899 /* store AVPicture in AVPacket, as expected by the output format */
1900 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1902 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1906 opkt.data = (uint8_t *)&pict;
1907 opkt.size = sizeof(AVPicture);
1908 opkt.flags |= AV_PKT_FLAG_KEY;
1911 write_frame(of->ctx, &opkt, ost);
1914 int guess_input_channel_layout(InputStream *ist)
1916 AVCodecContext *dec = ist->dec_ctx;
1918 if (!dec->channel_layout) {
1919 char layout_name[256];
1921 if (dec->channels > ist->guess_layout_max)
1923 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1924 if (!dec->channel_layout)
1926 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1927 dec->channels, dec->channel_layout);
1928 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1929 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1934 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1936 AVFrame *decoded_frame, *f;
1937 AVCodecContext *avctx = ist->dec_ctx;
1938 int i, ret, err = 0, resample_changed;
1939 AVRational decoded_frame_tb;
1941 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1942 return AVERROR(ENOMEM);
1943 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1944 return AVERROR(ENOMEM);
1945 decoded_frame = ist->decoded_frame;
1947 update_benchmark(NULL);
1948 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1949 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1951 if (ret >= 0 && avctx->sample_rate <= 0) {
1952 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1953 ret = AVERROR_INVALIDDATA;
1956 if (*got_output || ret<0)
1957 decode_error_stat[ret<0] ++;
1959 if (ret < 0 && exit_on_error)
1962 if (!*got_output || ret < 0)
1965 ist->samples_decoded += decoded_frame->nb_samples;
1966 ist->frames_decoded++;
1969 /* increment next_dts to use for the case where the input stream does not
1970 have timestamps or there are multiple frames in the packet */
1971 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1973 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1977 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1978 ist->resample_channels != avctx->channels ||
1979 ist->resample_channel_layout != decoded_frame->channel_layout ||
1980 ist->resample_sample_rate != decoded_frame->sample_rate;
1981 if (resample_changed) {
1982 char layout1[64], layout2[64];
1984 if (!guess_input_channel_layout(ist)) {
1985 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986 "layout for Input Stream #%d.%d\n", ist->file_index,
1990 decoded_frame->channel_layout = avctx->channel_layout;
1992 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1993 ist->resample_channel_layout);
1994 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995 decoded_frame->channel_layout);
1997 av_log(NULL, AV_LOG_INFO,
1998 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999 ist->file_index, ist->st->index,
2000 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2001 ist->resample_channels, layout1,
2002 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003 avctx->channels, layout2);
2005 ist->resample_sample_fmt = decoded_frame->format;
2006 ist->resample_sample_rate = decoded_frame->sample_rate;
2007 ist->resample_channel_layout = decoded_frame->channel_layout;
2008 ist->resample_channels = avctx->channels;
2010 for (i = 0; i < nb_filtergraphs; i++)
2011 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012 FilterGraph *fg = filtergraphs[i];
2013 if (configure_filtergraph(fg) < 0) {
2014 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2020 /* if the decoder provides a pts, use it instead of the last packet pts.
2021 the decoder could be delaying output by a packet or more. */
2022 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024 decoded_frame_tb = avctx->time_base;
2025 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = decoded_frame->pkt_pts;
2027 decoded_frame_tb = ist->st->time_base;
2028 } else if (pkt->pts != AV_NOPTS_VALUE) {
2029 decoded_frame->pts = pkt->pts;
2030 decoded_frame_tb = ist->st->time_base;
2032 decoded_frame->pts = ist->dts;
2033 decoded_frame_tb = AV_TIME_BASE_Q;
2035 pkt->pts = AV_NOPTS_VALUE;
2036 if (decoded_frame->pts != AV_NOPTS_VALUE)
2037 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039 (AVRational){1, avctx->sample_rate});
2040 for (i = 0; i < ist->nb_filters; i++) {
2041 if (i < ist->nb_filters - 1) {
2042 f = ist->filter_frame;
2043 err = av_frame_ref(f, decoded_frame);
2048 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2049 AV_BUFFERSRC_FLAG_PUSH);
2050 if (err == AVERROR_EOF)
2051 err = 0; /* ignore */
2055 decoded_frame->pts = AV_NOPTS_VALUE;
2057 av_frame_unref(ist->filter_frame);
2058 av_frame_unref(decoded_frame);
2059 return err < 0 ? err : ret;
2062 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2064 AVFrame *decoded_frame, *f;
2065 int i, ret = 0, err = 0, resample_changed;
2066 int64_t best_effort_timestamp;
2067 AVRational *frame_sample_aspect;
2069 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2072 return AVERROR(ENOMEM);
2073 decoded_frame = ist->decoded_frame;
2074 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2076 update_benchmark(NULL);
2077 ret = avcodec_decode_video2(ist->dec_ctx,
2078 decoded_frame, got_output, pkt);
2079 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2081 // The following line may be required in some cases where there is no parser
2082 // or the parser does not has_b_frames correctly
2083 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2084 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2085 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2087 av_log(ist->dec_ctx, AV_LOG_WARNING,
2088 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2089 "If you want to help, upload a sample "
2090 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2091 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2092 ist->dec_ctx->has_b_frames,
2093 ist->st->codec->has_b_frames);
2096 if (*got_output || ret<0)
2097 decode_error_stat[ret<0] ++;
2099 if (ret < 0 && exit_on_error)
2102 if (*got_output && ret >= 0) {
2103 if (ist->dec_ctx->width != decoded_frame->width ||
2104 ist->dec_ctx->height != decoded_frame->height ||
2105 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2106 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2107 decoded_frame->width,
2108 decoded_frame->height,
2109 decoded_frame->format,
2110 ist->dec_ctx->width,
2111 ist->dec_ctx->height,
2112 ist->dec_ctx->pix_fmt);
2116 if (!*got_output || ret < 0)
2119 if(ist->top_field_first>=0)
2120 decoded_frame->top_field_first = ist->top_field_first;
2122 ist->frames_decoded++;
2124 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2125 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2129 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2131 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2132 if(best_effort_timestamp != AV_NOPTS_VALUE)
2133 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2136 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2137 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2138 ist->st->index, av_ts2str(decoded_frame->pts),
2139 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2140 best_effort_timestamp,
2141 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2142 decoded_frame->key_frame, decoded_frame->pict_type,
2143 ist->st->time_base.num, ist->st->time_base.den);
2148 if (ist->st->sample_aspect_ratio.num)
2149 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2151 resample_changed = ist->resample_width != decoded_frame->width ||
2152 ist->resample_height != decoded_frame->height ||
2153 ist->resample_pix_fmt != decoded_frame->format;
2154 if (resample_changed) {
2155 av_log(NULL, AV_LOG_INFO,
2156 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2157 ist->file_index, ist->st->index,
2158 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2159 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2161 ist->resample_width = decoded_frame->width;
2162 ist->resample_height = decoded_frame->height;
2163 ist->resample_pix_fmt = decoded_frame->format;
2165 for (i = 0; i < nb_filtergraphs; i++) {
2166 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2167 configure_filtergraph(filtergraphs[i]) < 0) {
2168 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2174 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2175 for (i = 0; i < ist->nb_filters; i++) {
2176 if (!frame_sample_aspect->num)
2177 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2179 if (i < ist->nb_filters - 1) {
2180 f = ist->filter_frame;
2181 err = av_frame_ref(f, decoded_frame);
2186 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2187 if (ret == AVERROR_EOF) {
2188 ret = 0; /* ignore */
2189 } else if (ret < 0) {
2190 av_log(NULL, AV_LOG_FATAL,
2191 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2197 av_frame_unref(ist->filter_frame);
2198 av_frame_unref(decoded_frame);
2199 return err < 0 ? err : ret;
2202 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2204 AVSubtitle subtitle;
2205 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2206 &subtitle, got_output, pkt);
2208 if (*got_output || ret<0)
2209 decode_error_stat[ret<0] ++;
2211 if (ret < 0 && exit_on_error)
2214 if (ret < 0 || !*got_output) {
2216 sub2video_flush(ist);
2220 if (ist->fix_sub_duration) {
2222 if (ist->prev_sub.got_output) {
2223 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2224 1000, AV_TIME_BASE);
2225 if (end < ist->prev_sub.subtitle.end_display_time) {
2226 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2227 "Subtitle duration reduced from %d to %d%s\n",
2228 ist->prev_sub.subtitle.end_display_time, end,
2229 end <= 0 ? ", dropping it" : "");
2230 ist->prev_sub.subtitle.end_display_time = end;
2233 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2234 FFSWAP(int, ret, ist->prev_sub.ret);
2235 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2243 sub2video_update(ist, &subtitle);
2245 if (!subtitle.num_rects)
2248 ist->frames_decoded++;
2250 for (i = 0; i < nb_output_streams; i++) {
2251 OutputStream *ost = output_streams[i];
2253 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2254 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2257 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2261 avsubtitle_free(&subtitle);
2265 static int send_filter_eof(InputStream *ist)
2268 for (i = 0; i < ist->nb_filters; i++) {
2269 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2276 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2277 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2283 if (!ist->saw_first_ts) {
2284 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2286 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2287 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2288 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2290 ist->saw_first_ts = 1;
2293 if (ist->next_dts == AV_NOPTS_VALUE)
2294 ist->next_dts = ist->dts;
2295 if (ist->next_pts == AV_NOPTS_VALUE)
2296 ist->next_pts = ist->pts;
2300 av_init_packet(&avpkt);
2308 if (pkt->dts != AV_NOPTS_VALUE) {
2309 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2310 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2311 ist->next_pts = ist->pts = ist->dts;
2314 // while we have more to decode or while the decoder did output something on EOF
2315 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2319 ist->pts = ist->next_pts;
2320 ist->dts = ist->next_dts;
2322 if (avpkt.size && avpkt.size != pkt->size &&
2323 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2324 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2325 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2326 ist->showed_multi_packet_warning = 1;
2329 switch (ist->dec_ctx->codec_type) {
2330 case AVMEDIA_TYPE_AUDIO:
2331 ret = decode_audio (ist, &avpkt, &got_output);
2333 case AVMEDIA_TYPE_VIDEO:
2334 ret = decode_video (ist, &avpkt, &got_output);
2335 if (avpkt.duration) {
2336 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2337 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2338 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2339 duration = ((int64_t)AV_TIME_BASE *
2340 ist->dec_ctx->framerate.den * ticks) /
2341 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2345 if(ist->dts != AV_NOPTS_VALUE && duration) {
2346 ist->next_dts += duration;
2348 ist->next_dts = AV_NOPTS_VALUE;
2351 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2353 case AVMEDIA_TYPE_SUBTITLE:
2354 ret = transcode_subtitles(ist, &avpkt, &got_output);
2361 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2362 ist->file_index, ist->st->index, av_err2str(ret));
2369 avpkt.pts= AV_NOPTS_VALUE;
2371 // touch data and size only if not EOF
2373 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2381 if (got_output && !pkt)
2385 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2386 if (!pkt && ist->decoding_needed && !got_output) {
2387 int ret = send_filter_eof(ist);
2389 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2394 /* handle stream copy */
2395 if (!ist->decoding_needed) {
2396 ist->dts = ist->next_dts;
2397 switch (ist->dec_ctx->codec_type) {
2398 case AVMEDIA_TYPE_AUDIO:
2399 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2400 ist->dec_ctx->sample_rate;
2402 case AVMEDIA_TYPE_VIDEO:
2403 if (ist->framerate.num) {
2404 // TODO: Remove work-around for c99-to-c89 issue 7
2405 AVRational time_base_q = AV_TIME_BASE_Q;
2406 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2407 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2408 } else if (pkt->duration) {
2409 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2410 } else if(ist->dec_ctx->framerate.num != 0) {
2411 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2412 ist->next_dts += ((int64_t)AV_TIME_BASE *
2413 ist->dec_ctx->framerate.den * ticks) /
2414 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2418 ist->pts = ist->dts;
2419 ist->next_pts = ist->next_dts;
2421 for (i = 0; pkt && i < nb_output_streams; i++) {
2422 OutputStream *ost = output_streams[i];
2424 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2427 do_streamcopy(ist, ost, pkt);
2433 static void print_sdp(void)
2438 AVIOContext *sdp_pb;
2439 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2443 for (i = 0, j = 0; i < nb_output_files; i++) {
2444 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2445 avc[j] = output_files[i]->ctx;
2450 av_sdp_create(avc, j, sdp, sizeof(sdp));
2452 if (!sdp_filename) {
2453 printf("SDP:\n%s\n", sdp);
2456 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2457 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2459 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2460 avio_closep(&sdp_pb);
2461 av_freep(&sdp_filename);
2468 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2471 for (i = 0; hwaccels[i].name; i++)
2472 if (hwaccels[i].pix_fmt == pix_fmt)
2473 return &hwaccels[i];
2477 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2479 InputStream *ist = s->opaque;
2480 const enum AVPixelFormat *p;
2483 for (p = pix_fmts; *p != -1; p++) {
2484 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2485 const HWAccel *hwaccel;
2487 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2490 hwaccel = get_hwaccel(*p);
2492 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2493 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2496 ret = hwaccel->init(s);
2498 if (ist->hwaccel_id == hwaccel->id) {
2499 av_log(NULL, AV_LOG_FATAL,
2500 "%s hwaccel requested for input stream #%d:%d, "
2501 "but cannot be initialized.\n", hwaccel->name,
2502 ist->file_index, ist->st->index);
2503 return AV_PIX_FMT_NONE;
2507 ist->active_hwaccel_id = hwaccel->id;
2508 ist->hwaccel_pix_fmt = *p;
2515 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2517 InputStream *ist = s->opaque;
2519 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2520 return ist->hwaccel_get_buffer(s, frame, flags);
2522 return avcodec_default_get_buffer2(s, frame, flags);
2525 static int init_input_stream(int ist_index, char *error, int error_len)
2528 InputStream *ist = input_streams[ist_index];
2530 if (ist->decoding_needed) {
2531 AVCodec *codec = ist->dec;
2533 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2534 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2535 return AVERROR(EINVAL);
2538 ist->dec_ctx->opaque = ist;
2539 ist->dec_ctx->get_format = get_format;
2540 ist->dec_ctx->get_buffer2 = get_buffer;
2541 ist->dec_ctx->thread_safe_callbacks = 1;
2543 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2544 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2545 (ist->decoding_needed & DECODING_FOR_OST)) {
2546 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2547 if (ist->decoding_needed & DECODING_FOR_FILTER)
2548 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2551 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2552 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2553 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2554 if (ret == AVERROR_EXPERIMENTAL)
2555 abort_codec_experimental(codec, 0);
2557 snprintf(error, error_len,
2558 "Error while opening decoder for input stream "
2560 ist->file_index, ist->st->index, av_err2str(ret));
2563 assert_avoptions(ist->decoder_opts);
2566 ist->next_pts = AV_NOPTS_VALUE;
2567 ist->next_dts = AV_NOPTS_VALUE;
2572 static InputStream *get_input_stream(OutputStream *ost)
2574 if (ost->source_index >= 0)
2575 return input_streams[ost->source_index];
2579 static int compare_int64(const void *a, const void *b)
2581 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2582 return va < vb ? -1 : va > vb ? +1 : 0;
2585 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2589 if (ost->encoding_needed) {
2590 AVCodec *codec = ost->enc;
2591 AVCodecContext *dec = NULL;
2594 if ((ist = get_input_stream(ost)))
2596 if (dec && dec->subtitle_header) {
2597 /* ASS code assumes this buffer is null terminated so add extra byte. */
2598 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2599 if (!ost->enc_ctx->subtitle_header)
2600 return AVERROR(ENOMEM);
2601 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2602 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2604 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2605 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2606 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2607 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2609 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2610 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2611 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2613 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2614 if (ret == AVERROR_EXPERIMENTAL)
2615 abort_codec_experimental(codec, 1);
2616 snprintf(error, error_len,
2617 "Error while opening encoder for output stream #%d:%d - "
2618 "maybe incorrect parameters such as bit_rate, rate, width or height",
2619 ost->file_index, ost->index);
2622 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2623 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2624 av_buffersink_set_frame_size(ost->filter->filter,
2625 ost->enc_ctx->frame_size);
2626 assert_avoptions(ost->encoder_opts);
2627 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2628 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2629 " It takes bits/s as argument, not kbits/s\n");
2631 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2633 av_log(NULL, AV_LOG_FATAL,
2634 "Error initializing the output stream codec context.\n");
2638 // copy timebase while removing common factors
2639 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2640 ost->st->codec->codec= ost->enc_ctx->codec;
2642 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2644 av_log(NULL, AV_LOG_FATAL,
2645 "Error setting up codec context options.\n");
2648 // copy timebase while removing common factors
2649 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2655 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2656 AVCodecContext *avctx)
2659 int n = 1, i, size, index = 0;
2662 for (p = kf; *p; p++)
2666 pts = av_malloc_array(size, sizeof(*pts));
2668 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2673 for (i = 0; i < n; i++) {
2674 char *next = strchr(p, ',');
2679 if (!memcmp(p, "chapters", 8)) {
2681 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2684 if (avf->nb_chapters > INT_MAX - size ||
2685 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2687 av_log(NULL, AV_LOG_FATAL,
2688 "Could not allocate forced key frames array.\n");
2691 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2692 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2694 for (j = 0; j < avf->nb_chapters; j++) {
2695 AVChapter *c = avf->chapters[j];
2696 av_assert1(index < size);
2697 pts[index++] = av_rescale_q(c->start, c->time_base,
2698 avctx->time_base) + t;
2703 t = parse_time_or_die("force_key_frames", p, 1);
2704 av_assert1(index < size);
2705 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2712 av_assert0(index == size);
2713 qsort(pts, size, sizeof(*pts), compare_int64);
2714 ost->forced_kf_count = size;
2715 ost->forced_kf_pts = pts;
2718 static void report_new_stream(int input_index, AVPacket *pkt)
2720 InputFile *file = input_files[input_index];
2721 AVStream *st = file->ctx->streams[pkt->stream_index];
2723 if (pkt->stream_index < file->nb_streams_warn)
2725 av_log(file->ctx, AV_LOG_WARNING,
2726 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2727 av_get_media_type_string(st->codec->codec_type),
2728 input_index, pkt->stream_index,
2729 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2730 file->nb_streams_warn = pkt->stream_index + 1;
2733 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2735 AVDictionaryEntry *e;
2737 uint8_t *encoder_string;
2738 int encoder_string_len;
2739 int format_flags = 0;
2740 int codec_flags = 0;
2742 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2745 e = av_dict_get(of->opts, "fflags", NULL, 0);
2747 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2750 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2752 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2754 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2757 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2760 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2761 encoder_string = av_mallocz(encoder_string_len);
2762 if (!encoder_string)
2765 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2766 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2768 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2769 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2770 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2771 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2774 static int transcode_init(void)
2776 int ret = 0, i, j, k;
2777 AVFormatContext *oc;
2780 char error[1024] = {0};
2783 for (i = 0; i < nb_filtergraphs; i++) {
2784 FilterGraph *fg = filtergraphs[i];
2785 for (j = 0; j < fg->nb_outputs; j++) {
2786 OutputFilter *ofilter = fg->outputs[j];
2787 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2789 if (fg->nb_inputs != 1)
2791 for (k = nb_input_streams-1; k >= 0 ; k--)
2792 if (fg->inputs[0]->ist == input_streams[k])
2794 ofilter->ost->source_index = k;
2798 /* init framerate emulation */
2799 for (i = 0; i < nb_input_files; i++) {
2800 InputFile *ifile = input_files[i];
2801 if (ifile->rate_emu)
2802 for (j = 0; j < ifile->nb_streams; j++)
2803 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2806 /* for each output stream, we compute the right encoding parameters */
2807 for (i = 0; i < nb_output_streams; i++) {
2808 AVCodecContext *enc_ctx;
2809 AVCodecContext *dec_ctx = NULL;
2810 ost = output_streams[i];
2811 oc = output_files[ost->file_index]->ctx;
2812 ist = get_input_stream(ost);
2814 if (ost->attachment_filename)
2817 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2820 dec_ctx = ist->dec_ctx;
2822 ost->st->disposition = ist->st->disposition;
2823 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2824 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2826 for (j=0; j<oc->nb_streams; j++) {
2827 AVStream *st = oc->streams[j];
2828 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2831 if (j == oc->nb_streams)
2832 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2833 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2836 if (ost->stream_copy) {
2838 uint64_t extra_size;
2840 av_assert0(ist && !ost->filter);
2842 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2844 if (extra_size > INT_MAX) {
2845 return AVERROR(EINVAL);
2848 /* if stream_copy is selected, no need to decode or encode */
2849 enc_ctx->codec_id = dec_ctx->codec_id;
2850 enc_ctx->codec_type = dec_ctx->codec_type;
2852 if (!enc_ctx->codec_tag) {
2853 unsigned int codec_tag;
2854 if (!oc->oformat->codec_tag ||
2855 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2856 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2857 enc_ctx->codec_tag = dec_ctx->codec_tag;
2860 enc_ctx->bit_rate = dec_ctx->bit_rate;
2861 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2862 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2863 enc_ctx->field_order = dec_ctx->field_order;
2864 if (dec_ctx->extradata_size) {
2865 enc_ctx->extradata = av_mallocz(extra_size);
2866 if (!enc_ctx->extradata) {
2867 return AVERROR(ENOMEM);
2869 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2871 enc_ctx->extradata_size= dec_ctx->extradata_size;
2872 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2874 enc_ctx->time_base = ist->st->time_base;
2876 * Avi is a special case here because it supports variable fps but
2877 * having the fps and timebase differe significantly adds quite some
2880 if(!strcmp(oc->oformat->name, "avi")) {
2881 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2882 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2883 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2884 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2886 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2887 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2888 enc_ctx->ticks_per_frame = 2;
2889 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2890 && av_q2d(ist->st->time_base) < 1.0/500
2892 enc_ctx->time_base = dec_ctx->time_base;
2893 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2894 enc_ctx->time_base.den *= 2;
2895 enc_ctx->ticks_per_frame = 2;
2897 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2898 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2899 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2900 && strcmp(oc->oformat->name, "f4v")
2902 if( copy_tb<0 && dec_ctx->time_base.den
2903 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2904 && av_q2d(ist->st->time_base) < 1.0/500
2906 enc_ctx->time_base = dec_ctx->time_base;
2907 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2910 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2911 && dec_ctx->time_base.num < dec_ctx->time_base.den
2912 && dec_ctx->time_base.num > 0
2913 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2914 enc_ctx->time_base = dec_ctx->time_base;
2917 if (ist && !ost->frame_rate.num)
2918 ost->frame_rate = ist->framerate;
2919 if(ost->frame_rate.num)
2920 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2922 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2923 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2925 if (ist->st->nb_side_data) {
2926 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2927 sizeof(*ist->st->side_data));
2928 if (!ost->st->side_data)
2929 return AVERROR(ENOMEM);
2931 ost->st->nb_side_data = 0;
2932 for (j = 0; j < ist->st->nb_side_data; j++) {
2933 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2934 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2936 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2939 sd_dst->data = av_malloc(sd_src->size);
2941 return AVERROR(ENOMEM);
2942 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2943 sd_dst->size = sd_src->size;
2944 sd_dst->type = sd_src->type;
2945 ost->st->nb_side_data++;
2949 ost->parser = av_parser_init(enc_ctx->codec_id);
2951 switch (enc_ctx->codec_type) {
2952 case AVMEDIA_TYPE_AUDIO:
2953 if (audio_volume != 256) {
2954 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2957 enc_ctx->channel_layout = dec_ctx->channel_layout;
2958 enc_ctx->sample_rate = dec_ctx->sample_rate;
2959 enc_ctx->channels = dec_ctx->channels;
2960 enc_ctx->frame_size = dec_ctx->frame_size;
2961 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2962 enc_ctx->block_align = dec_ctx->block_align;
2963 enc_ctx->initial_padding = dec_ctx->delay;
2964 #if FF_API_AUDIOENC_DELAY
2965 enc_ctx->delay = dec_ctx->delay;
2967 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2968 enc_ctx->block_align= 0;
2969 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2970 enc_ctx->block_align= 0;
2972 case AVMEDIA_TYPE_VIDEO:
2973 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2974 enc_ctx->width = dec_ctx->width;
2975 enc_ctx->height = dec_ctx->height;
2976 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2977 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2979 av_mul_q(ost->frame_aspect_ratio,
2980 (AVRational){ enc_ctx->height, enc_ctx->width });
2981 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2982 "with stream copy may produce invalid files\n");
2984 else if (ist->st->sample_aspect_ratio.num)
2985 sar = ist->st->sample_aspect_ratio;
2987 sar = dec_ctx->sample_aspect_ratio;
2988 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2989 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2990 ost->st->r_frame_rate = ist->st->r_frame_rate;
2992 case AVMEDIA_TYPE_SUBTITLE:
2993 enc_ctx->width = dec_ctx->width;
2994 enc_ctx->height = dec_ctx->height;
2996 case AVMEDIA_TYPE_UNKNOWN:
2997 case AVMEDIA_TYPE_DATA:
2998 case AVMEDIA_TYPE_ATTACHMENT:
3005 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3007 /* should only happen when a default codec is not present. */
3008 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3009 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3010 ret = AVERROR(EINVAL);
3014 set_encoder_id(output_files[ost->file_index], ost);
3017 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3018 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3020 fg = init_simple_filtergraph(ist, ost);
3021 if (configure_filtergraph(fg)) {
3022 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3027 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3028 if (!ost->frame_rate.num)
3029 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3030 if (ist && !ost->frame_rate.num)
3031 ost->frame_rate = ist->framerate;
3032 if (ist && !ost->frame_rate.num)
3033 ost->frame_rate = ist->st->r_frame_rate;
3034 if (ist && !ost->frame_rate.num) {
3035 ost->frame_rate = (AVRational){25, 1};
3036 av_log(NULL, AV_LOG_WARNING,
3038 "about the input framerate is available. Falling "
3039 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3040 "if you want a different framerate.\n",
3041 ost->file_index, ost->index);
3043 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3044 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3045 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3046 ost->frame_rate = ost->enc->supported_framerates[idx];
3048 // reduce frame rate for mpeg4 to be within the spec limits
3049 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3050 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3051 ost->frame_rate.num, ost->frame_rate.den, 65535);
3055 switch (enc_ctx->codec_type) {
3056 case AVMEDIA_TYPE_AUDIO:
3057 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3058 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3059 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3060 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3061 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3063 case AVMEDIA_TYPE_VIDEO:
3064 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3065 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3066 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3067 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3068 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3069 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3070 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3072 for (j = 0; j < ost->forced_kf_count; j++)
3073 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3075 enc_ctx->time_base);
3077 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3078 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3079 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3080 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3081 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3082 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3083 if (!strncmp(ost->enc->name, "libx264", 7) &&
3084 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3085 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3086 av_log(NULL, AV_LOG_WARNING,
3087 "No pixel format specified, %s for H.264 encoding chosen.\n"
3088 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3089 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3090 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3091 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3092 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3093 av_log(NULL, AV_LOG_WARNING,
3094 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3095 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3096 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3097 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3099 ost->st->avg_frame_rate = ost->frame_rate;
3102 enc_ctx->width != dec_ctx->width ||
3103 enc_ctx->height != dec_ctx->height ||
3104 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3105 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3108 if (ost->forced_keyframes) {
3109 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3110 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3111 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3113 av_log(NULL, AV_LOG_ERROR,
3114 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3117 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3118 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3119 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3120 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3122 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3123 // parse it only for static kf timings
3124 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3125 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3129 case AVMEDIA_TYPE_SUBTITLE:
3130 enc_ctx->time_base = (AVRational){1, 1000};
3131 if (!enc_ctx->width) {
3132 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3133 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3136 case AVMEDIA_TYPE_DATA:
3144 if (ost->disposition) {
3145 static const AVOption opts[] = {
3146 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3147 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3148 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3149 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3150 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3151 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3152 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3153 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3154 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3155 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3156 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3157 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3158 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3159 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3162 static const AVClass class = {
3164 .item_name = av_default_item_name,
3166 .version = LIBAVUTIL_VERSION_INT,
3168 const AVClass *pclass = &class;
3170 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3176 /* open each encoder */
3177 for (i = 0; i < nb_output_streams; i++) {
3178 ret = init_output_stream(output_streams[i], error, sizeof(error));
3183 /* init input streams */
3184 for (i = 0; i < nb_input_streams; i++)
3185 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3186 for (i = 0; i < nb_output_streams; i++) {
3187 ost = output_streams[i];
3188 avcodec_close(ost->enc_ctx);
3193 /* discard unused programs */
3194 for (i = 0; i < nb_input_files; i++) {
3195 InputFile *ifile = input_files[i];
3196 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3197 AVProgram *p = ifile->ctx->programs[j];
3198 int discard = AVDISCARD_ALL;
3200 for (k = 0; k < p->nb_stream_indexes; k++)
3201 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3202 discard = AVDISCARD_DEFAULT;
3205 p->discard = discard;
3209 /* open files and write file headers */
3210 for (i = 0; i < nb_output_files; i++) {
3211 oc = output_files[i]->ctx;
3212 oc->interrupt_callback = int_cb;
3213 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3214 snprintf(error, sizeof(error),
3215 "Could not write header for output file #%d "
3216 "(incorrect codec parameters ?): %s",
3217 i, av_err2str(ret));
3218 ret = AVERROR(EINVAL);
3221 // assert_avoptions(output_files[i]->opts);
3222 if (strcmp(oc->oformat->name, "rtp")) {
3228 /* dump the file output parameters - cannot be done before in case
3230 for (i = 0; i < nb_output_files; i++) {
3231 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3234 /* dump the stream mapping */
3235 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3236 for (i = 0; i < nb_input_streams; i++) {
3237 ist = input_streams[i];
3239 for (j = 0; j < ist->nb_filters; j++) {
3240 if (ist->filters[j]->graph->graph_desc) {
3241 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3242 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3243 ist->filters[j]->name);
3244 if (nb_filtergraphs > 1)
3245 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3246 av_log(NULL, AV_LOG_INFO, "\n");
3251 for (i = 0; i < nb_output_streams; i++) {
3252 ost = output_streams[i];
3254 if (ost->attachment_filename) {
3255 /* an attached file */
3256 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3257 ost->attachment_filename, ost->file_index, ost->index);
3261 if (ost->filter && ost->filter->graph->graph_desc) {
3262 /* output from a complex graph */
3263 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3264 if (nb_filtergraphs > 1)
3265 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3267 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3268 ost->index, ost->enc ? ost->enc->name : "?");
3272 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3273 input_streams[ost->source_index]->file_index,
3274 input_streams[ost->source_index]->st->index,
3277 if (ost->sync_ist != input_streams[ost->source_index])
3278 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3279 ost->sync_ist->file_index,
3280 ost->sync_ist->st->index);
3281 if (ost->stream_copy)
3282 av_log(NULL, AV_LOG_INFO, " (copy)");
3284 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3285 const AVCodec *out_codec = ost->enc;
3286 const char *decoder_name = "?";
3287 const char *in_codec_name = "?";
3288 const char *encoder_name = "?";
3289 const char *out_codec_name = "?";
3290 const AVCodecDescriptor *desc;
3293 decoder_name = in_codec->name;
3294 desc = avcodec_descriptor_get(in_codec->id);
3296 in_codec_name = desc->name;
3297 if (!strcmp(decoder_name, in_codec_name))
3298 decoder_name = "native";
3302 encoder_name = out_codec->name;
3303 desc = avcodec_descriptor_get(out_codec->id);
3305 out_codec_name = desc->name;
3306 if (!strcmp(encoder_name, out_codec_name))
3307 encoder_name = "native";
3310 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3311 in_codec_name, decoder_name,
3312 out_codec_name, encoder_name);
3314 av_log(NULL, AV_LOG_INFO, "\n");
3318 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3322 if (sdp_filename || want_sdp) {
3326 transcode_init_done = 1;
3331 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3332 static int need_output(void)
3336 for (i = 0; i < nb_output_streams; i++) {
3337 OutputStream *ost = output_streams[i];
3338 OutputFile *of = output_files[ost->file_index];
3339 AVFormatContext *os = output_files[ost->file_index]->ctx;
3341 if (ost->finished ||
3342 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3344 if (ost->frame_number >= ost->max_frames) {
3346 for (j = 0; j < of->ctx->nb_streams; j++)
3347 close_output_stream(output_streams[of->ost_index + j]);
3358 * Select the output stream to process.
3360 * @return selected output stream, or NULL if none available
3362 static OutputStream *choose_output(void)
3365 int64_t opts_min = INT64_MAX;
3366 OutputStream *ost_min = NULL;
3368 for (i = 0; i < nb_output_streams; i++) {
3369 OutputStream *ost = output_streams[i];
3370 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3372 if (!ost->finished && opts < opts_min) {
3374 ost_min = ost->unavailable ? NULL : ost;
3380 static int check_keyboard_interaction(int64_t cur_time)
3383 static int64_t last_time;
3384 if (received_nb_signals)
3385 return AVERROR_EXIT;
3386 /* read_key() returns 0 on EOF */
3387 if(cur_time - last_time >= 100000 && !run_as_daemon){
3389 last_time = cur_time;
3393 return AVERROR_EXIT;
3394 if (key == '+') av_log_set_level(av_log_get_level()+10);
3395 if (key == '-') av_log_set_level(av_log_get_level()-10);
3396 if (key == 's') qp_hist ^= 1;
3399 do_hex_dump = do_pkt_dump = 0;
3400 } else if(do_pkt_dump){
3404 av_log_set_level(AV_LOG_DEBUG);
3406 if (key == 'c' || key == 'C'){
3407 char buf[4096], target[64], command[256], arg[256] = {0};
3410 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3412 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3417 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3418 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3419 target, time, command, arg);
3420 for (i = 0; i < nb_filtergraphs; i++) {
3421 FilterGraph *fg = filtergraphs[i];
3424 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3425 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3426 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3427 } else if (key == 'c') {
3428 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3429 ret = AVERROR_PATCHWELCOME;
3431 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3433 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3438 av_log(NULL, AV_LOG_ERROR,
3439 "Parse error, at least 3 arguments were expected, "
3440 "only %d given in string '%s'\n", n, buf);
3443 if (key == 'd' || key == 'D'){
3446 debug = input_streams[0]->st->codec->debug<<1;
3447 if(!debug) debug = 1;
3448 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3454 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3458 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3459 fprintf(stderr,"error parsing debug value\n");
3461 for(i=0;i<nb_input_streams;i++) {
3462 input_streams[i]->st->codec->debug = debug;
3464 for(i=0;i<nb_output_streams;i++) {
3465 OutputStream *ost = output_streams[i];
3466 ost->enc_ctx->debug = debug;
3468 if(debug) av_log_set_level(AV_LOG_DEBUG);
3469 fprintf(stderr,"debug=%d\n", debug);
3472 fprintf(stderr, "key function\n"
3473 "? show this help\n"
3474 "+ increase verbosity\n"
3475 "- decrease verbosity\n"
3476 "c Send command to first matching filter supporting it\n"
3477 "C Send/Que command to all matching filters\n"
3478 "D cycle through available debug modes\n"
3479 "h dump packets/hex press to cycle through the 3 states\n"
3481 "s Show QP histogram\n"
3488 static void *input_thread(void *arg)
3491 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3496 ret = av_read_frame(f->ctx, &pkt);
3498 if (ret == AVERROR(EAGAIN)) {
3503 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3506 av_dup_packet(&pkt);
3507 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3508 if (flags && ret == AVERROR(EAGAIN)) {
3510 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3511 av_log(f->ctx, AV_LOG_WARNING,
3512 "Thread message queue blocking; consider raising the "
3513 "thread_queue_size option (current value: %d)\n",
3514 f->thread_queue_size);
3517 if (ret != AVERROR_EOF)
3518 av_log(f->ctx, AV_LOG_ERROR,
3519 "Unable to send packet to main thread: %s\n",
3521 av_free_packet(&pkt);
3522 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3530 static void free_input_threads(void)
3534 for (i = 0; i < nb_input_files; i++) {
3535 InputFile *f = input_files[i];
3538 if (!f || !f->in_thread_queue)
3540 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3541 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3542 av_free_packet(&pkt);
3544 pthread_join(f->thread, NULL);
3546 av_thread_message_queue_free(&f->in_thread_queue);
3550 static int init_input_threads(void)
3554 if (nb_input_files == 1)
3557 for (i = 0; i < nb_input_files; i++) {
3558 InputFile *f = input_files[i];
3560 if (f->ctx->pb ? !f->ctx->pb->seekable :
3561 strcmp(f->ctx->iformat->name, "lavfi"))
3562 f->non_blocking = 1;
3563 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3564 f->thread_queue_size, sizeof(AVPacket));
3568 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3569 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3570 av_thread_message_queue_free(&f->in_thread_queue);
3571 return AVERROR(ret);
3577 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3579 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3581 AV_THREAD_MESSAGE_NONBLOCK : 0);
3585 static int get_input_packet(InputFile *f, AVPacket *pkt)
3589 for (i = 0; i < f->nb_streams; i++) {
3590 InputStream *ist = input_streams[f->ist_index + i];
3591 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3592 int64_t now = av_gettime_relative() - ist->start;
3594 return AVERROR(EAGAIN);
3599 if (nb_input_files > 1)
3600 return get_input_packet_mt(f, pkt);
3602 return av_read_frame(f->ctx, pkt);
3605 static int got_eagain(void)
3608 for (i = 0; i < nb_output_streams; i++)
3609 if (output_streams[i]->unavailable)
3614 static void reset_eagain(void)
3617 for (i = 0; i < nb_input_files; i++)
3618 input_files[i]->eagain = 0;
3619 for (i = 0; i < nb_output_streams; i++)
3620 output_streams[i]->unavailable = 0;
3625 * - 0 -- one packet was read and processed
3626 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3627 * this function should be called again
3628 * - AVERROR_EOF -- this function should not be called again
3630 static int process_input(int file_index)
3632 InputFile *ifile = input_files[file_index];
3633 AVFormatContext *is;
3639 ret = get_input_packet(ifile, &pkt);
3641 if (ret == AVERROR(EAGAIN)) {
3646 if (ret != AVERROR_EOF) {
3647 print_error(is->filename, ret);
3652 for (i = 0; i < ifile->nb_streams; i++) {
3653 ist = input_streams[ifile->ist_index + i];
3654 if (ist->decoding_needed) {
3655 ret = process_input_packet(ist, NULL);
3660 /* mark all outputs that don't go through lavfi as finished */
3661 for (j = 0; j < nb_output_streams; j++) {
3662 OutputStream *ost = output_streams[j];
3664 if (ost->source_index == ifile->ist_index + i &&
3665 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3666 finish_output_stream(ost);
3670 ifile->eof_reached = 1;
3671 return AVERROR(EAGAIN);
3677 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3678 is->streams[pkt.stream_index]);
3680 /* the following test is needed in case new streams appear
3681 dynamically in stream : we ignore them */
3682 if (pkt.stream_index >= ifile->nb_streams) {
3683 report_new_stream(file_index, &pkt);
3684 goto discard_packet;
3687 ist = input_streams[ifile->ist_index + pkt.stream_index];
3689 ist->data_size += pkt.size;
3693 goto discard_packet;
3696 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3697 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3698 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3699 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3700 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3701 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3702 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3703 av_ts2str(input_files[ist->file_index]->ts_offset),
3704 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3707 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3708 int64_t stime, stime2;
3709 // Correcting starttime based on the enabled streams
3710 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3711 // so we instead do it here as part of discontinuity handling
3712 if ( ist->next_dts == AV_NOPTS_VALUE
3713 && ifile->ts_offset == -is->start_time
3714 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3715 int64_t new_start_time = INT64_MAX;
3716 for (i=0; i<is->nb_streams; i++) {
3717 AVStream *st = is->streams[i];
3718 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3720 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3722 if (new_start_time > is->start_time) {
3723 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3724 ifile->ts_offset = -new_start_time;
3728 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3729 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3730 ist->wrap_correction_done = 1;
3732 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3733 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3734 ist->wrap_correction_done = 0;
3736 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3737 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3738 ist->wrap_correction_done = 0;
3742 /* add the stream-global side data to the first packet */
3743 if (ist->nb_packets == 1) {
3744 if (ist->st->nb_side_data)
3745 av_packet_split_side_data(&pkt);
3746 for (i = 0; i < ist->st->nb_side_data; i++) {
3747 AVPacketSideData *src_sd = &ist->st->side_data[i];
3750 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3752 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3755 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3759 memcpy(dst_data, src_sd->data, src_sd->size);
3763 if (pkt.dts != AV_NOPTS_VALUE)
3764 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3765 if (pkt.pts != AV_NOPTS_VALUE)
3766 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3768 if (pkt.pts != AV_NOPTS_VALUE)
3769 pkt.pts *= ist->ts_scale;
3770 if (pkt.dts != AV_NOPTS_VALUE)
3771 pkt.dts *= ist->ts_scale;
3773 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3774 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3775 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3776 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3777 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3778 int64_t delta = pkt_dts - ifile->last_ts;
3779 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3780 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3781 ifile->ts_offset -= delta;
3782 av_log(NULL, AV_LOG_DEBUG,
3783 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3784 delta, ifile->ts_offset);
3785 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3786 if (pkt.pts != AV_NOPTS_VALUE)
3787 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3791 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3792 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3793 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3795 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3796 int64_t delta = pkt_dts - ist->next_dts;
3797 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3798 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3799 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3800 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3801 ifile->ts_offset -= delta;
3802 av_log(NULL, AV_LOG_DEBUG,
3803 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3804 delta, ifile->ts_offset);
3805 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3806 if (pkt.pts != AV_NOPTS_VALUE)
3807 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3810 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3811 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3812 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3813 pkt.dts = AV_NOPTS_VALUE;
3815 if (pkt.pts != AV_NOPTS_VALUE){
3816 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3817 delta = pkt_pts - ist->next_dts;
3818 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3819 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3820 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3821 pkt.pts = AV_NOPTS_VALUE;
3827 if (pkt.dts != AV_NOPTS_VALUE)
3828 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3831 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3832 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3833 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3834 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3835 av_ts2str(input_files[ist->file_index]->ts_offset),
3836 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3839 sub2video_heartbeat(ist, pkt.pts);
3841 process_input_packet(ist, &pkt);
3844 av_free_packet(&pkt);
3850 * Perform a step of transcoding for the specified filter graph.
3852 * @param[in] graph filter graph to consider
3853 * @param[out] best_ist input stream where a frame would allow to continue
3854 * @return 0 for success, <0 for error
3856 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3859 int nb_requests, nb_requests_max = 0;
3860 InputFilter *ifilter;
3864 ret = avfilter_graph_request_oldest(graph->graph);
3866 return reap_filters(0);
3868 if (ret == AVERROR_EOF) {
3869 ret = reap_filters(1);
3870 for (i = 0; i < graph->nb_outputs; i++)
3871 close_output_stream(graph->outputs[i]->ost);
3874 if (ret != AVERROR(EAGAIN))
3877 for (i = 0; i < graph->nb_inputs; i++) {
3878 ifilter = graph->inputs[i];
3880 if (input_files[ist->file_index]->eagain ||
3881 input_files[ist->file_index]->eof_reached)
3883 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3884 if (nb_requests > nb_requests_max) {
3885 nb_requests_max = nb_requests;
3891 for (i = 0; i < graph->nb_outputs; i++)
3892 graph->outputs[i]->ost->unavailable = 1;
3898 * Run a single step of transcoding.
3900 * @return 0 for success, <0 for error
3902 static int transcode_step(void)
3908 ost = choose_output();
3915 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3920 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3925 av_assert0(ost->source_index >= 0);
3926 ist = input_streams[ost->source_index];
3929 ret = process_input(ist->file_index);
3930 if (ret == AVERROR(EAGAIN)) {
3931 if (input_files[ist->file_index]->eagain)
3932 ost->unavailable = 1;
3937 return ret == AVERROR_EOF ? 0 : ret;
3939 return reap_filters(0);
3943 * The following code is the main loop of the file converter
3945 static int transcode(void)
3948 AVFormatContext *os;
3951 int64_t timer_start;
3953 ret = transcode_init();
3957 if (stdin_interaction) {
3958 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3961 timer_start = av_gettime_relative();
3964 if ((ret = init_input_threads()) < 0)
3968 while (!received_sigterm) {
3969 int64_t cur_time= av_gettime_relative();
3971 /* if 'q' pressed, exits */
3972 if (stdin_interaction)
3973 if (check_keyboard_interaction(cur_time) < 0)
3976 /* check if there's any stream where output is still needed */
3977 if (!need_output()) {
3978 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3982 ret = transcode_step();
3984 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3988 av_strerror(ret, errbuf, sizeof(errbuf));
3990 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3995 /* dump report by using the output first video and audio streams */
3996 print_report(0, timer_start, cur_time);
3999 free_input_threads();
4002 /* at the end of stream, we must flush the decoder buffers */
4003 for (i = 0; i < nb_input_streams; i++) {
4004 ist = input_streams[i];
4005 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4006 process_input_packet(ist, NULL);
4013 /* write the trailer if needed and close file */
4014 for (i = 0; i < nb_output_files; i++) {
4015 os = output_files[i]->ctx;
4016 av_write_trailer(os);
4019 /* dump report by using the first video and audio streams */
4020 print_report(1, timer_start, av_gettime_relative());
4022 /* close each encoder */
4023 for (i = 0; i < nb_output_streams; i++) {
4024 ost = output_streams[i];
4025 if (ost->encoding_needed) {
4026 av_freep(&ost->enc_ctx->stats_in);
4030 /* close each decoder */
4031 for (i = 0; i < nb_input_streams; i++) {
4032 ist = input_streams[i];
4033 if (ist->decoding_needed) {
4034 avcodec_close(ist->dec_ctx);
4035 if (ist->hwaccel_uninit)
4036 ist->hwaccel_uninit(ist->dec_ctx);
4045 free_input_threads();
4048 if (output_streams) {
4049 for (i = 0; i < nb_output_streams; i++) {
4050 ost = output_streams[i];
4053 fclose(ost->logfile);
4054 ost->logfile = NULL;
4056 av_freep(&ost->forced_kf_pts);
4057 av_freep(&ost->apad);
4058 av_freep(&ost->disposition);
4059 av_dict_free(&ost->encoder_opts);
4060 av_dict_free(&ost->sws_dict);
4061 av_dict_free(&ost->swr_opts);
4062 av_dict_free(&ost->resample_opts);
4063 av_dict_free(&ost->bsf_args);
4071 static int64_t getutime(void)
4074 struct rusage rusage;
4076 getrusage(RUSAGE_SELF, &rusage);
4077 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4078 #elif HAVE_GETPROCESSTIMES
4080 FILETIME c, e, k, u;
4081 proc = GetCurrentProcess();
4082 GetProcessTimes(proc, &c, &e, &k, &u);
4083 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4085 return av_gettime_relative();
4089 static int64_t getmaxrss(void)
4091 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4092 struct rusage rusage;
4093 getrusage(RUSAGE_SELF, &rusage);
4094 return (int64_t)rusage.ru_maxrss * 1024;
4095 #elif HAVE_GETPROCESSMEMORYINFO
4097 PROCESS_MEMORY_COUNTERS memcounters;
4098 proc = GetCurrentProcess();
4099 memcounters.cb = sizeof(memcounters);
4100 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4101 return memcounters.PeakPagefileUsage;
4107 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4111 int main(int argc, char **argv)
4116 register_exit(ffmpeg_cleanup);
4118 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4120 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4121 parse_loglevel(argc, argv, options);
4123 if(argc>1 && !strcmp(argv[1], "-d")){
4125 av_log_set_callback(log_callback_null);
4130 avcodec_register_all();
4132 avdevice_register_all();
4134 avfilter_register_all();
4136 avformat_network_init();
4138 show_banner(argc, argv, options);
4142 /* parse options and open all input/output files */
4143 ret = ffmpeg_parse_options(argc, argv);
4147 if (nb_output_files <= 0 && nb_input_files == 0) {
4149 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4153 /* file converter / grab */
4154 if (nb_output_files <= 0) {
4155 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4159 // if (nb_input_files == 0) {
4160 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4164 current_time = ti = getutime();
4165 if (transcode() < 0)
4167 ti = getutime() - ti;
4169 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4171 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4172 decode_error_stat[0], decode_error_stat[1]);
4173 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4176 exit_program(received_nb_signals ? 255 : main_return_code);
4177 return main_return_code;