2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 InputStream **input_streams = NULL;
140 int nb_input_streams = 0;
141 InputFile **input_files = NULL;
142 int nb_input_files = 0;
144 OutputStream **output_streams = NULL;
145 int nb_output_streams = 0;
146 OutputFile **output_files = NULL;
147 int nb_output_files = 0;
149 FilterGraph **filtergraphs;
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
160 static void free_input_threads(void);
164 Convert subtitles to video with alpha to insert them in filter graphs.
165 This is a temporary solution until libavfilter gets real subtitles support.
168 static int sub2video_get_blank_frame(InputStream *ist)
171 AVFrame *frame = ist->sub2video.frame;
173 av_frame_unref(frame);
174 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
176 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
177 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186 uint32_t *pal, *dst2;
190 if (r->type != SUBTITLE_BITMAP) {
191 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196 r->x, r->y, r->w, r->h, w, h
201 dst += r->y * dst_linesize + r->x * 4;
202 src = r->pict.data[0];
203 pal = (uint32_t *)r->pict.data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->pict.linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 AVFrame *frame = ist->sub2video.frame;
233 int64_t pts, end_pts;
238 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 num_rects = sub->num_rects;
244 pts = ist->sub2video.end_pts;
248 if (sub2video_get_blank_frame(ist) < 0) {
249 av_log(ist->dec_ctx, AV_LOG_ERROR,
250 "Impossible to get a blank canvas.\n");
253 dst = frame->data [0];
254 dst_linesize = frame->linesize[0];
255 for (i = 0; i < num_rects; i++)
256 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257 sub2video_push_ref(ist, pts);
258 ist->sub2video.end_pts = end_pts;
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 InputFile *infile = input_files[ist->file_index];
267 /* When a frame is read from a file, examine all sub2video streams in
268 the same file and send the sub2video frame again. Otherwise, decoded
269 video frames could be accumulating in the filter graph while a filter
270 (possibly overlay) is desperately waiting for a subtitle frame. */
271 for (i = 0; i < infile->nb_streams; i++) {
272 InputStream *ist2 = input_streams[infile->ist_index + i];
273 if (!ist2->sub2video.frame)
275 /* subtitles seem to be usually muxed ahead of other streams;
276 if not, subtracting a larger time here is necessary */
277 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278 /* do not send the heartbeat frame if the subtitle is already ahead */
279 if (pts2 <= ist2->sub2video.last_pts)
281 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282 sub2video_update(ist2, NULL);
283 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286 sub2video_push_ref(ist2, pts2);
290 static void sub2video_flush(InputStream *ist)
294 if (ist->sub2video.end_pts < INT64_MAX)
295 sub2video_update(ist, NULL);
296 for (i = 0; i < ist->nb_filters; i++)
297 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
300 /* end of sub2video hack */
302 static void term_exit_sigsafe(void)
306 tcsetattr (0, TCSANOW, &oldtty);
312 av_log(NULL, AV_LOG_QUIET, "%s", "");
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
323 sigterm_handler(int sig)
325 received_sigterm = sig;
326 received_nb_signals++;
328 if(received_nb_signals > 3) {
329 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330 strlen("Received > 3 system signals, hard exiting\n"));
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
344 case CTRL_BREAK_EVENT:
345 sigterm_handler(SIGINT);
348 case CTRL_CLOSE_EVENT:
349 case CTRL_LOGOFF_EVENT:
350 case CTRL_SHUTDOWN_EVENT:
351 sigterm_handler(SIGTERM);
352 /* Basically, with these 3 events, when we return from this method the
353 process is hard terminated, so stall as long as we need to
354 to try and let the main thread(s) clean up and gracefully terminate
355 (we have at most 5 seconds, but should be done far before that). */
356 while (!ffmpeg_exited) {
362 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
375 istty = isatty(0) && isatty(2);
377 if (istty && tcgetattr (0, &tty) == 0) {
381 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
382 |INLCR|IGNCR|ICRNL|IXON);
383 tty.c_oflag |= OPOST;
384 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
385 tty.c_cflag &= ~(CSIZE|PARENB);
390 tcsetattr (0, TCSANOW, &tty);
392 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
396 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
397 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 signal(SIGXCPU, sigterm_handler);
401 #if HAVE_SETCONSOLECTRLHANDLER
402 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
406 /* read a key without blocking */
407 static int read_key(void)
419 n = select(1, &rfds, NULL, NULL, &tv);
428 # if HAVE_PEEKNAMEDPIPE
430 static HANDLE input_handle;
433 input_handle = GetStdHandle(STD_INPUT_HANDLE);
434 is_pipe = !GetConsoleMode(input_handle, &dw);
438 /* When running under a GUI, you will end here. */
439 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
440 // input pipe may have been closed by the program that ran ffmpeg
458 static int decode_interrupt_cb(void *ctx)
460 return received_nb_signals > transcode_init_done;
463 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465 static void ffmpeg_cleanup(int ret)
470 int maxrss = getmaxrss() / 1024;
471 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
474 for (i = 0; i < nb_filtergraphs; i++) {
475 FilterGraph *fg = filtergraphs[i];
476 avfilter_graph_free(&fg->graph);
477 for (j = 0; j < fg->nb_inputs; j++) {
478 av_freep(&fg->inputs[j]->name);
479 av_freep(&fg->inputs[j]);
481 av_freep(&fg->inputs);
482 for (j = 0; j < fg->nb_outputs; j++) {
483 av_freep(&fg->outputs[j]->name);
484 av_freep(&fg->outputs[j]);
486 av_freep(&fg->outputs);
487 av_freep(&fg->graph_desc);
489 av_freep(&filtergraphs[i]);
491 av_freep(&filtergraphs);
493 av_freep(&subtitle_out);
496 for (i = 0; i < nb_output_files; i++) {
497 OutputFile *of = output_files[i];
502 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504 avformat_free_context(s);
505 av_dict_free(&of->opts);
507 av_freep(&output_files[i]);
509 for (i = 0; i < nb_output_streams; i++) {
510 OutputStream *ost = output_streams[i];
511 AVBitStreamFilterContext *bsfc;
516 bsfc = ost->bitstream_filters;
518 AVBitStreamFilterContext *next = bsfc->next;
519 av_bitstream_filter_close(bsfc);
522 ost->bitstream_filters = NULL;
523 av_frame_free(&ost->filtered_frame);
524 av_frame_free(&ost->last_frame);
526 av_parser_close(ost->parser);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 avcodec_free_context(&ost->enc_ctx);
538 av_freep(&output_streams[i]);
541 free_input_threads();
543 for (i = 0; i < nb_input_files; i++) {
544 avformat_close_input(&input_files[i]->ctx);
545 av_freep(&input_files[i]);
547 for (i = 0; i < nb_input_streams; i++) {
548 InputStream *ist = input_streams[i];
550 av_frame_free(&ist->decoded_frame);
551 av_frame_free(&ist->filter_frame);
552 av_dict_free(&ist->decoder_opts);
553 avsubtitle_free(&ist->prev_sub.subtitle);
554 av_frame_free(&ist->sub2video.frame);
555 av_freep(&ist->filters);
556 av_freep(&ist->hwaccel_device);
558 avcodec_free_context(&ist->dec_ctx);
560 av_freep(&input_streams[i]);
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
683 av_packet_split_side_data(pkt);
686 AVPacket new_pkt = *pkt;
687 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
690 int a = av_bitstream_filter_filter(bsfc, avctx,
691 bsf_arg ? bsf_arg->value : NULL,
692 &new_pkt.data, &new_pkt.size,
693 pkt->data, pkt->size,
694 pkt->flags & AV_PKT_FLAG_KEY);
695 if(a == 0 && new_pkt.data != pkt->data) {
696 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
698 memcpy(t, new_pkt.data, new_pkt.size);
699 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
707 pkt->side_data = NULL;
708 pkt->side_data_elems = 0;
710 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
711 av_buffer_default_free, NULL, 0);
716 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
717 bsfc->filter->name, pkt->stream_index,
718 avctx->codec ? avctx->codec->name : "copy");
728 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
729 if (pkt->dts != AV_NOPTS_VALUE &&
730 pkt->pts != AV_NOPTS_VALUE &&
731 pkt->dts > pkt->pts) {
732 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
734 ost->file_index, ost->st->index);
736 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
737 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
738 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
741 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
742 pkt->dts != AV_NOPTS_VALUE &&
743 ost->last_mux_dts != AV_NOPTS_VALUE) {
744 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
745 if (pkt->dts < max) {
746 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
747 av_log(s, loglevel, "Non-monotonous DTS in output stream "
748 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
749 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
751 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
754 av_log(s, loglevel, "changing to %"PRId64". This may result "
755 "in incorrect timestamps in the output file.\n",
757 if(pkt->pts >= pkt->dts)
758 pkt->pts = FFMAX(pkt->pts, max);
763 ost->last_mux_dts = pkt->dts;
765 ost->data_size += pkt->size;
766 ost->packets_written++;
768 pkt->stream_index = ost->index;
771 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
772 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
773 av_get_media_type_string(ost->enc_ctx->codec_type),
774 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
775 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
780 ret = av_interleaved_write_frame(s, pkt);
782 print_error("av_interleaved_write_frame()", ret);
783 main_return_code = 1;
784 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
789 static void close_output_stream(OutputStream *ost)
791 OutputFile *of = output_files[ost->file_index];
793 ost->finished |= ENCODER_FINISHED;
795 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
796 of->recording_time = FFMIN(of->recording_time, end);
800 static int check_recording_time(OutputStream *ost)
802 OutputFile *of = output_files[ost->file_index];
804 if (of->recording_time != INT64_MAX &&
805 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
806 AV_TIME_BASE_Q) >= 0) {
807 close_output_stream(ost);
813 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
816 AVCodecContext *enc = ost->enc_ctx;
820 av_init_packet(&pkt);
824 if (!check_recording_time(ost))
827 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
828 frame->pts = ost->sync_opts;
829 ost->sync_opts = frame->pts + frame->nb_samples;
830 ost->samples_encoded += frame->nb_samples;
831 ost->frames_encoded++;
833 av_assert0(pkt.size || !pkt.data);
834 update_benchmark(NULL);
836 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
837 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
838 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
839 enc->time_base.num, enc->time_base.den);
842 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
843 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
846 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
849 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
852 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
853 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
854 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
855 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
858 write_frame(s, &pkt, ost);
862 static void do_subtitle_out(AVFormatContext *s,
867 int subtitle_out_max_size = 1024 * 1024;
868 int subtitle_out_size, nb, i;
873 if (sub->pts == AV_NOPTS_VALUE) {
874 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
883 subtitle_out = av_malloc(subtitle_out_max_size);
885 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
890 /* Note: DVB subtitle need one packet to draw them and one other
891 packet to clear them */
892 /* XXX: signal it in the codec context ? */
893 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
898 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
900 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
901 pts -= output_files[ost->file_index]->start_time;
902 for (i = 0; i < nb; i++) {
903 unsigned save_num_rects = sub->num_rects;
905 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
906 if (!check_recording_time(ost))
910 // start_display_time is required to be 0
911 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
912 sub->end_display_time -= sub->start_display_time;
913 sub->start_display_time = 0;
917 ost->frames_encoded++;
919 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
920 subtitle_out_max_size, sub);
922 sub->num_rects = save_num_rects;
923 if (subtitle_out_size < 0) {
924 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
928 av_init_packet(&pkt);
929 pkt.data = subtitle_out;
930 pkt.size = subtitle_out_size;
931 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
932 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
933 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
934 /* XXX: the pts correction is handled here. Maybe handling
935 it in the codec would be better */
937 pkt.pts += 90 * sub->start_display_time;
939 pkt.pts += 90 * sub->end_display_time;
942 write_frame(s, &pkt, ost);
946 static void do_video_out(AVFormatContext *s,
948 AVFrame *next_picture,
951 int ret, format_video_sync;
953 AVCodecContext *enc = ost->enc_ctx;
954 AVCodecContext *mux_enc = ost->st->codec;
955 int nb_frames, nb0_frames, i;
956 double delta, delta0;
959 InputStream *ist = NULL;
960 AVFilterContext *filter = ost->filter->filter;
962 if (ost->source_index >= 0)
963 ist = input_streams[ost->source_index];
965 if (filter->inputs[0]->frame_rate.num > 0 &&
966 filter->inputs[0]->frame_rate.den > 0)
967 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
969 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
970 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
972 if (!ost->filters_script &&
976 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
977 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
982 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
983 ost->last_nb0_frames[1],
984 ost->last_nb0_frames[2]);
986 delta0 = sync_ipts - ost->sync_opts;
987 delta = delta0 + duration;
989 /* by default, we output a single frame */
993 format_video_sync = video_sync_method;
994 if (format_video_sync == VSYNC_AUTO) {
995 if(!strcmp(s->oformat->name, "avi")) {
996 format_video_sync = VSYNC_VFR;
998 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1000 && format_video_sync == VSYNC_CFR
1001 && input_files[ist->file_index]->ctx->nb_streams == 1
1002 && input_files[ist->file_index]->input_ts_offset == 0) {
1003 format_video_sync = VSYNC_VSCFR;
1005 if (format_video_sync == VSYNC_CFR && copy_ts) {
1006 format_video_sync = VSYNC_VSCFR;
1012 format_video_sync != VSYNC_PASSTHROUGH &&
1013 format_video_sync != VSYNC_DROP) {
1014 double cor = FFMIN(-delta0, duration);
1015 if (delta0 < -0.6) {
1016 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1018 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1024 switch (format_video_sync) {
1026 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1027 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1030 ost->sync_opts = lrint(sync_ipts);
1033 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1034 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1036 } else if (delta < -1.1)
1038 else if (delta > 1.1) {
1039 nb_frames = lrintf(delta);
1041 nb0_frames = lrintf(delta0 - 0.6);
1047 else if (delta > 0.6)
1048 ost->sync_opts = lrint(sync_ipts);
1051 case VSYNC_PASSTHROUGH:
1052 ost->sync_opts = lrint(sync_ipts);
1059 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1060 nb0_frames = FFMIN(nb0_frames, nb_frames);
1062 memmove(ost->last_nb0_frames + 1,
1063 ost->last_nb0_frames,
1064 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1065 ost->last_nb0_frames[0] = nb0_frames;
1067 if (nb0_frames == 0 && ost->last_droped) {
1069 av_log(NULL, AV_LOG_VERBOSE,
1070 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1071 ost->frame_number, ost->st->index, ost->last_frame->pts);
1073 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1074 if (nb_frames > dts_error_threshold * 30) {
1075 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1079 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1080 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1082 ost->last_droped = nb_frames == nb0_frames && next_picture;
1084 /* duplicates frame if needed */
1085 for (i = 0; i < nb_frames; i++) {
1086 AVFrame *in_picture;
1087 av_init_packet(&pkt);
1091 if (i < nb0_frames && ost->last_frame) {
1092 in_picture = ost->last_frame;
1094 in_picture = next_picture;
1099 in_picture->pts = ost->sync_opts;
1102 if (!check_recording_time(ost))
1104 if (ost->frame_number >= ost->max_frames)
1108 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1109 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1110 /* raw pictures are written as AVPicture structure to
1111 avoid any copies. We support temporarily the older
1113 if (in_picture->interlaced_frame)
1114 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1116 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1117 pkt.data = (uint8_t *)in_picture;
1118 pkt.size = sizeof(AVPicture);
1119 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1120 pkt.flags |= AV_PKT_FLAG_KEY;
1122 write_frame(s, &pkt, ost);
1124 int got_packet, forced_keyframe = 0;
1127 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1128 ost->top_field_first >= 0)
1129 in_picture->top_field_first = !!ost->top_field_first;
1131 if (in_picture->interlaced_frame) {
1132 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1133 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1135 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1137 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1139 in_picture->quality = enc->global_quality;
1140 in_picture->pict_type = 0;
1142 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1143 in_picture->pts * av_q2d(enc->time_base) : NAN;
1144 if (ost->forced_kf_index < ost->forced_kf_count &&
1145 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1146 ost->forced_kf_index++;
1147 forced_keyframe = 1;
1148 } else if (ost->forced_keyframes_pexpr) {
1150 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1151 res = av_expr_eval(ost->forced_keyframes_pexpr,
1152 ost->forced_keyframes_expr_const_values, NULL);
1153 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1154 ost->forced_keyframes_expr_const_values[FKF_N],
1155 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1156 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1157 ost->forced_keyframes_expr_const_values[FKF_T],
1158 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1161 forced_keyframe = 1;
1162 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1163 ost->forced_keyframes_expr_const_values[FKF_N];
1164 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1165 ost->forced_keyframes_expr_const_values[FKF_T];
1166 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1169 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1170 } else if ( ost->forced_keyframes
1171 && !strncmp(ost->forced_keyframes, "source", 6)
1172 && in_picture->key_frame==1) {
1173 forced_keyframe = 1;
1176 if (forced_keyframe) {
1177 in_picture->pict_type = AV_PICTURE_TYPE_I;
1178 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1181 update_benchmark(NULL);
1183 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1184 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1185 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1186 enc->time_base.num, enc->time_base.den);
1189 ost->frames_encoded++;
1191 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1192 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1194 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1200 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1201 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1202 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1203 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1206 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1207 pkt.pts = ost->sync_opts;
1209 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1212 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1213 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1214 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1215 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1218 frame_size = pkt.size;
1219 write_frame(s, &pkt, ost);
1221 /* if two pass, output log */
1222 if (ost->logfile && enc->stats_out) {
1223 fprintf(ost->logfile, "%s", enc->stats_out);
1229 * For video, number of frames in == number of packets out.
1230 * But there may be reordering, so we can't throw away frames on encoder
1231 * flush, we need to limit them here, before they go into encoder.
1233 ost->frame_number++;
1235 if (vstats_filename && frame_size)
1236 do_video_stats(ost, frame_size);
1239 if (!ost->last_frame)
1240 ost->last_frame = av_frame_alloc();
1241 av_frame_unref(ost->last_frame);
1242 if (next_picture && ost->last_frame)
1243 av_frame_ref(ost->last_frame, next_picture);
1245 av_frame_free(&ost->last_frame);
1248 static double psnr(double d)
1250 return -10.0 * log(d) / log(10.0);
1253 static void do_video_stats(OutputStream *ost, int frame_size)
1255 AVCodecContext *enc;
1257 double ti1, bitrate, avg_bitrate;
1259 /* this is executed just the first time do_video_stats is called */
1261 vstats_file = fopen(vstats_filename, "w");
1269 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1270 frame_number = ost->st->nb_frames;
1271 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1272 ost->quality / (float)FF_QP2LAMBDA);
1274 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1275 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1277 fprintf(vstats_file,"f_size= %6d ", frame_size);
1278 /* compute pts value */
1279 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1283 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1284 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1285 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1286 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1287 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1291 static void finish_output_stream(OutputStream *ost)
1293 OutputFile *of = output_files[ost->file_index];
1296 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1299 for (i = 0; i < of->ctx->nb_streams; i++)
1300 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1305 * Get and encode new output from any of the filtergraphs, without causing
1308 * @return 0 for success, <0 for severe errors
1310 static int reap_filters(int flush)
1312 AVFrame *filtered_frame = NULL;
1315 /* Reap all buffers present in the buffer sinks */
1316 for (i = 0; i < nb_output_streams; i++) {
1317 OutputStream *ost = output_streams[i];
1318 OutputFile *of = output_files[ost->file_index];
1319 AVFilterContext *filter;
1320 AVCodecContext *enc = ost->enc_ctx;
1325 filter = ost->filter->filter;
1327 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1328 return AVERROR(ENOMEM);
1330 filtered_frame = ost->filtered_frame;
1333 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1334 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1335 AV_BUFFERSINK_FLAG_NO_REQUEST);
1337 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1338 av_log(NULL, AV_LOG_WARNING,
1339 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1340 } else if (flush && ret == AVERROR_EOF) {
1341 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1342 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1346 if (ost->finished) {
1347 av_frame_unref(filtered_frame);
1350 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1351 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1352 AVRational tb = enc->time_base;
1353 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1355 tb.den <<= extra_bits;
1357 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1358 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1359 float_pts /= 1 << extra_bits;
1360 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1361 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1363 filtered_frame->pts =
1364 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1365 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1367 //if (ost->source_index >= 0)
1368 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1370 switch (filter->inputs[0]->type) {
1371 case AVMEDIA_TYPE_VIDEO:
1372 if (!ost->frame_aspect_ratio.num)
1373 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1376 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1377 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1379 enc->time_base.num, enc->time_base.den);
1382 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1384 case AVMEDIA_TYPE_AUDIO:
1385 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1386 enc->channels != av_frame_get_channels(filtered_frame)) {
1387 av_log(NULL, AV_LOG_ERROR,
1388 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1391 do_audio_out(of->ctx, ost, filtered_frame);
1394 // TODO support subtitle filters
1398 av_frame_unref(filtered_frame);
1405 static void print_final_stats(int64_t total_size)
1407 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1408 uint64_t subtitle_size = 0;
1409 uint64_t data_size = 0;
1410 float percent = -1.0;
1414 for (i = 0; i < nb_output_streams; i++) {
1415 OutputStream *ost = output_streams[i];
1416 switch (ost->enc_ctx->codec_type) {
1417 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1418 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1419 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1420 default: other_size += ost->data_size; break;
1422 extra_size += ost->enc_ctx->extradata_size;
1423 data_size += ost->data_size;
1424 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1425 != AV_CODEC_FLAG_PASS1)
1429 if (data_size && total_size>0 && total_size >= data_size)
1430 percent = 100.0 * (total_size - data_size) / data_size;
1432 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1433 video_size / 1024.0,
1434 audio_size / 1024.0,
1435 subtitle_size / 1024.0,
1436 other_size / 1024.0,
1437 extra_size / 1024.0);
1439 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1441 av_log(NULL, AV_LOG_INFO, "unknown");
1442 av_log(NULL, AV_LOG_INFO, "\n");
1444 /* print verbose per-stream stats */
1445 for (i = 0; i < nb_input_files; i++) {
1446 InputFile *f = input_files[i];
1447 uint64_t total_packets = 0, total_size = 0;
1449 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1450 i, f->ctx->filename);
1452 for (j = 0; j < f->nb_streams; j++) {
1453 InputStream *ist = input_streams[f->ist_index + j];
1454 enum AVMediaType type = ist->dec_ctx->codec_type;
1456 total_size += ist->data_size;
1457 total_packets += ist->nb_packets;
1459 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1460 i, j, media_type_string(type));
1461 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1462 ist->nb_packets, ist->data_size);
1464 if (ist->decoding_needed) {
1465 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1466 ist->frames_decoded);
1467 if (type == AVMEDIA_TYPE_AUDIO)
1468 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1469 av_log(NULL, AV_LOG_VERBOSE, "; ");
1472 av_log(NULL, AV_LOG_VERBOSE, "\n");
1475 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1476 total_packets, total_size);
1479 for (i = 0; i < nb_output_files; i++) {
1480 OutputFile *of = output_files[i];
1481 uint64_t total_packets = 0, total_size = 0;
1483 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1484 i, of->ctx->filename);
1486 for (j = 0; j < of->ctx->nb_streams; j++) {
1487 OutputStream *ost = output_streams[of->ost_index + j];
1488 enum AVMediaType type = ost->enc_ctx->codec_type;
1490 total_size += ost->data_size;
1491 total_packets += ost->packets_written;
1493 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1494 i, j, media_type_string(type));
1495 if (ost->encoding_needed) {
1496 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1497 ost->frames_encoded);
1498 if (type == AVMEDIA_TYPE_AUDIO)
1499 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1500 av_log(NULL, AV_LOG_VERBOSE, "; ");
1503 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1504 ost->packets_written, ost->data_size);
1506 av_log(NULL, AV_LOG_VERBOSE, "\n");
1509 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1510 total_packets, total_size);
1512 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1513 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1515 av_log(NULL, AV_LOG_WARNING, "\n");
1517 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1522 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1525 AVBPrint buf_script;
1527 AVFormatContext *oc;
1529 AVCodecContext *enc;
1530 int frame_number, vid, i;
1532 int64_t pts = INT64_MIN + 1;
1533 static int64_t last_time = -1;
1534 static int qp_histogram[52];
1535 int hours, mins, secs, us;
1537 if (!print_stats && !is_last_report && !progress_avio)
1540 if (!is_last_report) {
1541 if (last_time == -1) {
1542 last_time = cur_time;
1545 if ((cur_time - last_time) < 500000)
1547 last_time = cur_time;
1551 oc = output_files[0]->ctx;
1553 total_size = avio_size(oc->pb);
1554 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1555 total_size = avio_tell(oc->pb);
1559 av_bprint_init(&buf_script, 0, 1);
1560 for (i = 0; i < nb_output_streams; i++) {
1562 ost = output_streams[i];
1564 if (!ost->stream_copy)
1565 q = ost->quality / (float) FF_QP2LAMBDA;
1567 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1568 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1569 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1570 ost->file_index, ost->index, q);
1572 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1573 float fps, t = (cur_time-timer_start) / 1000000.0;
1575 frame_number = ost->frame_number;
1576 fps = t > 1 ? frame_number / t : 0;
1577 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1578 frame_number, fps < 9.95, fps, q);
1579 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1580 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1581 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1582 ost->file_index, ost->index, q);
1584 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1588 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1590 for (j = 0; j < 32; j++)
1591 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1594 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1596 double error, error_sum = 0;
1597 double scale, scale_sum = 0;
1599 char type[3] = { 'Y','U','V' };
1600 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1601 for (j = 0; j < 3; j++) {
1602 if (is_last_report) {
1603 error = enc->error[j];
1604 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1606 error = ost->error[j];
1607 scale = enc->width * enc->height * 255.0 * 255.0;
1613 p = psnr(error / scale);
1614 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1615 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1616 ost->file_index, ost->index, type[j] | 32, p);
1618 p = psnr(error_sum / scale_sum);
1619 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1620 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1621 ost->file_index, ost->index, p);
1625 /* compute min output value */
1626 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1627 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1628 ost->st->time_base, AV_TIME_BASE_Q));
1630 nb_frames_drop += ost->last_droped;
1633 secs = FFABS(pts) / AV_TIME_BASE;
1634 us = FFABS(pts) % AV_TIME_BASE;
1640 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1642 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1644 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1645 "size=%8.0fkB time=", total_size / 1024.0);
1647 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1648 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1649 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1650 (100 * us) / AV_TIME_BASE);
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1654 av_bprintf(&buf_script, "bitrate=N/A\n");
1656 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1657 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1660 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1661 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1662 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1663 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1664 hours, mins, secs, us);
1666 if (nb_frames_dup || nb_frames_drop)
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1668 nb_frames_dup, nb_frames_drop);
1669 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1670 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1672 if (print_stats || is_last_report) {
1673 const char end = is_last_report ? '\n' : '\r';
1674 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1675 fprintf(stderr, "%s %c", buf, end);
1677 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1682 if (progress_avio) {
1683 av_bprintf(&buf_script, "progress=%s\n",
1684 is_last_report ? "end" : "continue");
1685 avio_write(progress_avio, buf_script.str,
1686 FFMIN(buf_script.len, buf_script.size - 1));
1687 avio_flush(progress_avio);
1688 av_bprint_finalize(&buf_script, NULL);
1689 if (is_last_report) {
1690 avio_closep(&progress_avio);
1695 print_final_stats(total_size);
1698 static void flush_encoders(void)
1702 for (i = 0; i < nb_output_streams; i++) {
1703 OutputStream *ost = output_streams[i];
1704 AVCodecContext *enc = ost->enc_ctx;
1705 AVFormatContext *os = output_files[ost->file_index]->ctx;
1706 int stop_encoding = 0;
1708 if (!ost->encoding_needed)
1711 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1713 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1717 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1720 switch (enc->codec_type) {
1721 case AVMEDIA_TYPE_AUDIO:
1722 encode = avcodec_encode_audio2;
1725 case AVMEDIA_TYPE_VIDEO:
1726 encode = avcodec_encode_video2;
1737 av_init_packet(&pkt);
1741 update_benchmark(NULL);
1742 ret = encode(enc, &pkt, NULL, &got_packet);
1743 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1745 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1750 if (ost->logfile && enc->stats_out) {
1751 fprintf(ost->logfile, "%s", enc->stats_out);
1757 if (ost->finished & MUXER_FINISHED) {
1758 av_free_packet(&pkt);
1761 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1762 pkt_size = pkt.size;
1763 write_frame(os, &pkt, ost);
1764 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1765 do_video_stats(ost, pkt_size);
1776 * Check whether a packet from ist should be written into ost at this time
1778 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1780 OutputFile *of = output_files[ost->file_index];
1781 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1783 if (ost->source_index != ist_index)
1789 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1795 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1797 OutputFile *of = output_files[ost->file_index];
1798 InputFile *f = input_files [ist->file_index];
1799 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1800 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1801 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1805 av_init_packet(&opkt);
1807 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1808 !ost->copy_initial_nonkeyframes)
1811 if (pkt->pts == AV_NOPTS_VALUE) {
1812 if (!ost->frame_number && ist->pts < start_time &&
1813 !ost->copy_prior_start)
1816 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1817 !ost->copy_prior_start)
1821 if (of->recording_time != INT64_MAX &&
1822 ist->pts >= of->recording_time + start_time) {
1823 close_output_stream(ost);
1827 if (f->recording_time != INT64_MAX) {
1828 start_time = f->ctx->start_time;
1829 if (f->start_time != AV_NOPTS_VALUE)
1830 start_time += f->start_time;
1831 if (ist->pts >= f->recording_time + start_time) {
1832 close_output_stream(ost);
1837 /* force the input stream PTS */
1838 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1841 if (pkt->pts != AV_NOPTS_VALUE)
1842 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1844 opkt.pts = AV_NOPTS_VALUE;
1846 if (pkt->dts == AV_NOPTS_VALUE)
1847 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1849 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1850 opkt.dts -= ost_tb_start_time;
1852 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1853 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1855 duration = ist->dec_ctx->frame_size;
1856 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1857 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1858 ost->st->time_base) - ost_tb_start_time;
1861 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1862 opkt.flags = pkt->flags;
1863 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1864 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1865 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1866 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1867 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1869 int ret = av_parser_change(ost->parser, ost->st->codec,
1870 &opkt.data, &opkt.size,
1871 pkt->data, pkt->size,
1872 pkt->flags & AV_PKT_FLAG_KEY);
1874 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1879 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1884 opkt.data = pkt->data;
1885 opkt.size = pkt->size;
1887 av_copy_packet_side_data(&opkt, pkt);
1889 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1890 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1891 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1892 /* store AVPicture in AVPacket, as expected by the output format */
1893 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1895 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1899 opkt.data = (uint8_t *)&pict;
1900 opkt.size = sizeof(AVPicture);
1901 opkt.flags |= AV_PKT_FLAG_KEY;
1904 write_frame(of->ctx, &opkt, ost);
1907 int guess_input_channel_layout(InputStream *ist)
1909 AVCodecContext *dec = ist->dec_ctx;
1911 if (!dec->channel_layout) {
1912 char layout_name[256];
1914 if (dec->channels > ist->guess_layout_max)
1916 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1917 if (!dec->channel_layout)
1919 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1920 dec->channels, dec->channel_layout);
1921 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1922 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1927 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1929 AVFrame *decoded_frame, *f;
1930 AVCodecContext *avctx = ist->dec_ctx;
1931 int i, ret, err = 0, resample_changed;
1932 AVRational decoded_frame_tb;
1934 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1935 return AVERROR(ENOMEM);
1936 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1937 return AVERROR(ENOMEM);
1938 decoded_frame = ist->decoded_frame;
1940 update_benchmark(NULL);
1941 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1942 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1944 if (ret >= 0 && avctx->sample_rate <= 0) {
1945 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1946 ret = AVERROR_INVALIDDATA;
1949 if (*got_output || ret<0)
1950 decode_error_stat[ret<0] ++;
1952 if (ret < 0 && exit_on_error)
1955 if (!*got_output || ret < 0)
1958 ist->samples_decoded += decoded_frame->nb_samples;
1959 ist->frames_decoded++;
1962 /* increment next_dts to use for the case where the input stream does not
1963 have timestamps or there are multiple frames in the packet */
1964 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1966 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1970 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1971 ist->resample_channels != avctx->channels ||
1972 ist->resample_channel_layout != decoded_frame->channel_layout ||
1973 ist->resample_sample_rate != decoded_frame->sample_rate;
1974 if (resample_changed) {
1975 char layout1[64], layout2[64];
1977 if (!guess_input_channel_layout(ist)) {
1978 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1979 "layout for Input Stream #%d.%d\n", ist->file_index,
1983 decoded_frame->channel_layout = avctx->channel_layout;
1985 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1986 ist->resample_channel_layout);
1987 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1988 decoded_frame->channel_layout);
1990 av_log(NULL, AV_LOG_INFO,
1991 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1992 ist->file_index, ist->st->index,
1993 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1994 ist->resample_channels, layout1,
1995 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1996 avctx->channels, layout2);
1998 ist->resample_sample_fmt = decoded_frame->format;
1999 ist->resample_sample_rate = decoded_frame->sample_rate;
2000 ist->resample_channel_layout = decoded_frame->channel_layout;
2001 ist->resample_channels = avctx->channels;
2003 for (i = 0; i < nb_filtergraphs; i++)
2004 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2005 FilterGraph *fg = filtergraphs[i];
2006 if (configure_filtergraph(fg) < 0) {
2007 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2013 /* if the decoder provides a pts, use it instead of the last packet pts.
2014 the decoder could be delaying output by a packet or more. */
2015 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2016 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2017 decoded_frame_tb = avctx->time_base;
2018 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2019 decoded_frame->pts = decoded_frame->pkt_pts;
2020 decoded_frame_tb = ist->st->time_base;
2021 } else if (pkt->pts != AV_NOPTS_VALUE) {
2022 decoded_frame->pts = pkt->pts;
2023 decoded_frame_tb = ist->st->time_base;
2025 decoded_frame->pts = ist->dts;
2026 decoded_frame_tb = AV_TIME_BASE_Q;
2028 pkt->pts = AV_NOPTS_VALUE;
2029 if (decoded_frame->pts != AV_NOPTS_VALUE)
2030 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2031 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2032 (AVRational){1, avctx->sample_rate});
2033 for (i = 0; i < ist->nb_filters; i++) {
2034 if (i < ist->nb_filters - 1) {
2035 f = ist->filter_frame;
2036 err = av_frame_ref(f, decoded_frame);
2041 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2042 AV_BUFFERSRC_FLAG_PUSH);
2043 if (err == AVERROR_EOF)
2044 err = 0; /* ignore */
2048 decoded_frame->pts = AV_NOPTS_VALUE;
2050 av_frame_unref(ist->filter_frame);
2051 av_frame_unref(decoded_frame);
2052 return err < 0 ? err : ret;
2055 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2057 AVFrame *decoded_frame, *f;
2058 int i, ret = 0, err = 0, resample_changed;
2059 int64_t best_effort_timestamp;
2060 AVRational *frame_sample_aspect;
2062 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2063 return AVERROR(ENOMEM);
2064 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2065 return AVERROR(ENOMEM);
2066 decoded_frame = ist->decoded_frame;
2067 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2069 update_benchmark(NULL);
2070 ret = avcodec_decode_video2(ist->dec_ctx,
2071 decoded_frame, got_output, pkt);
2072 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2074 // The following line may be required in some cases where there is no parser
2075 // or the parser does not has_b_frames correctly
2076 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2077 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2078 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2080 av_log(ist->dec_ctx, AV_LOG_WARNING,
2081 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2082 "If you want to help, upload a sample "
2083 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2084 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2085 ist->dec_ctx->has_b_frames,
2086 ist->st->codec->has_b_frames);
2089 if (*got_output || ret<0)
2090 decode_error_stat[ret<0] ++;
2092 if (ret < 0 && exit_on_error)
2095 if (*got_output && ret >= 0) {
2096 if (ist->dec_ctx->width != decoded_frame->width ||
2097 ist->dec_ctx->height != decoded_frame->height ||
2098 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2099 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2100 decoded_frame->width,
2101 decoded_frame->height,
2102 decoded_frame->format,
2103 ist->dec_ctx->width,
2104 ist->dec_ctx->height,
2105 ist->dec_ctx->pix_fmt);
2109 if (!*got_output || ret < 0)
2112 if(ist->top_field_first>=0)
2113 decoded_frame->top_field_first = ist->top_field_first;
2115 ist->frames_decoded++;
2117 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2118 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2122 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2124 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2125 if(best_effort_timestamp != AV_NOPTS_VALUE)
2126 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2129 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2130 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2131 ist->st->index, av_ts2str(decoded_frame->pts),
2132 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2133 best_effort_timestamp,
2134 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2135 decoded_frame->key_frame, decoded_frame->pict_type,
2136 ist->st->time_base.num, ist->st->time_base.den);
2141 if (ist->st->sample_aspect_ratio.num)
2142 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2144 resample_changed = ist->resample_width != decoded_frame->width ||
2145 ist->resample_height != decoded_frame->height ||
2146 ist->resample_pix_fmt != decoded_frame->format;
2147 if (resample_changed) {
2148 av_log(NULL, AV_LOG_INFO,
2149 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2150 ist->file_index, ist->st->index,
2151 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2152 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2154 ist->resample_width = decoded_frame->width;
2155 ist->resample_height = decoded_frame->height;
2156 ist->resample_pix_fmt = decoded_frame->format;
2158 for (i = 0; i < nb_filtergraphs; i++) {
2159 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2160 configure_filtergraph(filtergraphs[i]) < 0) {
2161 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2167 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2168 for (i = 0; i < ist->nb_filters; i++) {
2169 if (!frame_sample_aspect->num)
2170 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2172 if (i < ist->nb_filters - 1) {
2173 f = ist->filter_frame;
2174 err = av_frame_ref(f, decoded_frame);
2179 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2180 if (ret == AVERROR_EOF) {
2181 ret = 0; /* ignore */
2182 } else if (ret < 0) {
2183 av_log(NULL, AV_LOG_FATAL,
2184 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2190 av_frame_unref(ist->filter_frame);
2191 av_frame_unref(decoded_frame);
2192 return err < 0 ? err : ret;
2195 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2197 AVSubtitle subtitle;
2198 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2199 &subtitle, got_output, pkt);
2201 if (*got_output || ret<0)
2202 decode_error_stat[ret<0] ++;
2204 if (ret < 0 && exit_on_error)
2207 if (ret < 0 || !*got_output) {
2209 sub2video_flush(ist);
2213 if (ist->fix_sub_duration) {
2215 if (ist->prev_sub.got_output) {
2216 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2217 1000, AV_TIME_BASE);
2218 if (end < ist->prev_sub.subtitle.end_display_time) {
2219 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2220 "Subtitle duration reduced from %d to %d%s\n",
2221 ist->prev_sub.subtitle.end_display_time, end,
2222 end <= 0 ? ", dropping it" : "");
2223 ist->prev_sub.subtitle.end_display_time = end;
2226 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2227 FFSWAP(int, ret, ist->prev_sub.ret);
2228 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2236 sub2video_update(ist, &subtitle);
2238 if (!subtitle.num_rects)
2241 ist->frames_decoded++;
2243 for (i = 0; i < nb_output_streams; i++) {
2244 OutputStream *ost = output_streams[i];
2246 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2247 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2250 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2254 avsubtitle_free(&subtitle);
2258 static int send_filter_eof(InputStream *ist)
2261 for (i = 0; i < ist->nb_filters; i++) {
2262 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2269 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2270 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2276 if (!ist->saw_first_ts) {
2277 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2279 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2280 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2281 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2283 ist->saw_first_ts = 1;
2286 if (ist->next_dts == AV_NOPTS_VALUE)
2287 ist->next_dts = ist->dts;
2288 if (ist->next_pts == AV_NOPTS_VALUE)
2289 ist->next_pts = ist->pts;
2293 av_init_packet(&avpkt);
2301 if (pkt->dts != AV_NOPTS_VALUE) {
2302 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2303 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2304 ist->next_pts = ist->pts = ist->dts;
2307 // while we have more to decode or while the decoder did output something on EOF
2308 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2312 ist->pts = ist->next_pts;
2313 ist->dts = ist->next_dts;
2315 if (avpkt.size && avpkt.size != pkt->size &&
2316 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2317 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2318 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2319 ist->showed_multi_packet_warning = 1;
2322 switch (ist->dec_ctx->codec_type) {
2323 case AVMEDIA_TYPE_AUDIO:
2324 ret = decode_audio (ist, &avpkt, &got_output);
2326 case AVMEDIA_TYPE_VIDEO:
2327 ret = decode_video (ist, &avpkt, &got_output);
2328 if (avpkt.duration) {
2329 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2330 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2331 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2332 duration = ((int64_t)AV_TIME_BASE *
2333 ist->dec_ctx->framerate.den * ticks) /
2334 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2338 if(ist->dts != AV_NOPTS_VALUE && duration) {
2339 ist->next_dts += duration;
2341 ist->next_dts = AV_NOPTS_VALUE;
2344 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2346 case AVMEDIA_TYPE_SUBTITLE:
2347 ret = transcode_subtitles(ist, &avpkt, &got_output);
2354 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2355 ist->file_index, ist->st->index, av_err2str(ret));
2362 avpkt.pts= AV_NOPTS_VALUE;
2364 // touch data and size only if not EOF
2366 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2374 if (got_output && !pkt)
2378 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2379 if (!pkt && ist->decoding_needed && !got_output) {
2380 int ret = send_filter_eof(ist);
2382 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2387 /* handle stream copy */
2388 if (!ist->decoding_needed) {
2389 ist->dts = ist->next_dts;
2390 switch (ist->dec_ctx->codec_type) {
2391 case AVMEDIA_TYPE_AUDIO:
2392 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2393 ist->dec_ctx->sample_rate;
2395 case AVMEDIA_TYPE_VIDEO:
2396 if (ist->framerate.num) {
2397 // TODO: Remove work-around for c99-to-c89 issue 7
2398 AVRational time_base_q = AV_TIME_BASE_Q;
2399 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2400 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2401 } else if (pkt->duration) {
2402 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2403 } else if(ist->dec_ctx->framerate.num != 0) {
2404 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2405 ist->next_dts += ((int64_t)AV_TIME_BASE *
2406 ist->dec_ctx->framerate.den * ticks) /
2407 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2411 ist->pts = ist->dts;
2412 ist->next_pts = ist->next_dts;
2414 for (i = 0; pkt && i < nb_output_streams; i++) {
2415 OutputStream *ost = output_streams[i];
2417 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2420 do_streamcopy(ist, ost, pkt);
2426 static void print_sdp(void)
2431 AVIOContext *sdp_pb;
2432 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2436 for (i = 0, j = 0; i < nb_output_files; i++) {
2437 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2438 avc[j] = output_files[i]->ctx;
2443 av_sdp_create(avc, j, sdp, sizeof(sdp));
2445 if (!sdp_filename) {
2446 printf("SDP:\n%s\n", sdp);
2449 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2450 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2452 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2453 avio_closep(&sdp_pb);
2454 av_freep(&sdp_filename);
2461 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2464 for (i = 0; hwaccels[i].name; i++)
2465 if (hwaccels[i].pix_fmt == pix_fmt)
2466 return &hwaccels[i];
2470 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2472 InputStream *ist = s->opaque;
2473 const enum AVPixelFormat *p;
2476 for (p = pix_fmts; *p != -1; p++) {
2477 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2478 const HWAccel *hwaccel;
2480 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2483 hwaccel = get_hwaccel(*p);
2485 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2486 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2489 ret = hwaccel->init(s);
2491 if (ist->hwaccel_id == hwaccel->id) {
2492 av_log(NULL, AV_LOG_FATAL,
2493 "%s hwaccel requested for input stream #%d:%d, "
2494 "but cannot be initialized.\n", hwaccel->name,
2495 ist->file_index, ist->st->index);
2496 return AV_PIX_FMT_NONE;
2500 ist->active_hwaccel_id = hwaccel->id;
2501 ist->hwaccel_pix_fmt = *p;
2508 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2510 InputStream *ist = s->opaque;
2512 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2513 return ist->hwaccel_get_buffer(s, frame, flags);
2515 return avcodec_default_get_buffer2(s, frame, flags);
2518 static int init_input_stream(int ist_index, char *error, int error_len)
2521 InputStream *ist = input_streams[ist_index];
2523 if (ist->decoding_needed) {
2524 AVCodec *codec = ist->dec;
2526 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2527 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2528 return AVERROR(EINVAL);
2531 ist->dec_ctx->opaque = ist;
2532 ist->dec_ctx->get_format = get_format;
2533 ist->dec_ctx->get_buffer2 = get_buffer;
2534 ist->dec_ctx->thread_safe_callbacks = 1;
2536 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2537 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2538 (ist->decoding_needed & DECODING_FOR_OST)) {
2539 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2540 if (ist->decoding_needed & DECODING_FOR_FILTER)
2541 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2544 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2545 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2546 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2547 if (ret == AVERROR_EXPERIMENTAL)
2548 abort_codec_experimental(codec, 0);
2550 snprintf(error, error_len,
2551 "Error while opening decoder for input stream "
2553 ist->file_index, ist->st->index, av_err2str(ret));
2556 assert_avoptions(ist->decoder_opts);
2559 ist->next_pts = AV_NOPTS_VALUE;
2560 ist->next_dts = AV_NOPTS_VALUE;
2565 static InputStream *get_input_stream(OutputStream *ost)
2567 if (ost->source_index >= 0)
2568 return input_streams[ost->source_index];
2572 static int compare_int64(const void *a, const void *b)
2574 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2575 return va < vb ? -1 : va > vb ? +1 : 0;
2578 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2582 if (ost->encoding_needed) {
2583 AVCodec *codec = ost->enc;
2584 AVCodecContext *dec = NULL;
2587 if ((ist = get_input_stream(ost)))
2589 if (dec && dec->subtitle_header) {
2590 /* ASS code assumes this buffer is null terminated so add extra byte. */
2591 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2592 if (!ost->enc_ctx->subtitle_header)
2593 return AVERROR(ENOMEM);
2594 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2595 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2597 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2598 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2599 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2601 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2602 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2603 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2605 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2606 if (ret == AVERROR_EXPERIMENTAL)
2607 abort_codec_experimental(codec, 1);
2608 snprintf(error, error_len,
2609 "Error while opening encoder for output stream #%d:%d - "
2610 "maybe incorrect parameters such as bit_rate, rate, width or height",
2611 ost->file_index, ost->index);
2614 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2615 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2616 av_buffersink_set_frame_size(ost->filter->filter,
2617 ost->enc_ctx->frame_size);
2618 assert_avoptions(ost->encoder_opts);
2619 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2620 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2621 " It takes bits/s as argument, not kbits/s\n");
2623 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2625 av_log(NULL, AV_LOG_FATAL,
2626 "Error initializing the output stream codec context.\n");
2630 // copy timebase while removing common factors
2631 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2632 ost->st->codec->codec= ost->enc_ctx->codec;
2634 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2636 av_log(NULL, AV_LOG_FATAL,
2637 "Error setting up codec context options.\n");
2640 // copy timebase while removing common factors
2641 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2647 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2648 AVCodecContext *avctx)
2651 int n = 1, i, size, index = 0;
2654 for (p = kf; *p; p++)
2658 pts = av_malloc_array(size, sizeof(*pts));
2660 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2665 for (i = 0; i < n; i++) {
2666 char *next = strchr(p, ',');
2671 if (!memcmp(p, "chapters", 8)) {
2673 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2676 if (avf->nb_chapters > INT_MAX - size ||
2677 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2679 av_log(NULL, AV_LOG_FATAL,
2680 "Could not allocate forced key frames array.\n");
2683 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2684 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2686 for (j = 0; j < avf->nb_chapters; j++) {
2687 AVChapter *c = avf->chapters[j];
2688 av_assert1(index < size);
2689 pts[index++] = av_rescale_q(c->start, c->time_base,
2690 avctx->time_base) + t;
2695 t = parse_time_or_die("force_key_frames", p, 1);
2696 av_assert1(index < size);
2697 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2704 av_assert0(index == size);
2705 qsort(pts, size, sizeof(*pts), compare_int64);
2706 ost->forced_kf_count = size;
2707 ost->forced_kf_pts = pts;
2710 static void report_new_stream(int input_index, AVPacket *pkt)
2712 InputFile *file = input_files[input_index];
2713 AVStream *st = file->ctx->streams[pkt->stream_index];
2715 if (pkt->stream_index < file->nb_streams_warn)
2717 av_log(file->ctx, AV_LOG_WARNING,
2718 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2719 av_get_media_type_string(st->codec->codec_type),
2720 input_index, pkt->stream_index,
2721 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2722 file->nb_streams_warn = pkt->stream_index + 1;
2725 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2727 AVDictionaryEntry *e;
2729 uint8_t *encoder_string;
2730 int encoder_string_len;
2731 int format_flags = 0;
2732 int codec_flags = 0;
2734 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2737 e = av_dict_get(of->opts, "fflags", NULL, 0);
2739 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2742 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2744 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2746 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2749 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2752 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2753 encoder_string = av_mallocz(encoder_string_len);
2754 if (!encoder_string)
2757 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2758 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2760 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2761 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2762 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2763 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2766 static int transcode_init(void)
2768 int ret = 0, i, j, k;
2769 AVFormatContext *oc;
2772 char error[1024] = {0};
2775 for (i = 0; i < nb_filtergraphs; i++) {
2776 FilterGraph *fg = filtergraphs[i];
2777 for (j = 0; j < fg->nb_outputs; j++) {
2778 OutputFilter *ofilter = fg->outputs[j];
2779 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2781 if (fg->nb_inputs != 1)
2783 for (k = nb_input_streams-1; k >= 0 ; k--)
2784 if (fg->inputs[0]->ist == input_streams[k])
2786 ofilter->ost->source_index = k;
2790 /* init framerate emulation */
2791 for (i = 0; i < nb_input_files; i++) {
2792 InputFile *ifile = input_files[i];
2793 if (ifile->rate_emu)
2794 for (j = 0; j < ifile->nb_streams; j++)
2795 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2798 /* for each output stream, we compute the right encoding parameters */
2799 for (i = 0; i < nb_output_streams; i++) {
2800 AVCodecContext *enc_ctx;
2801 AVCodecContext *dec_ctx = NULL;
2802 ost = output_streams[i];
2803 oc = output_files[ost->file_index]->ctx;
2804 ist = get_input_stream(ost);
2806 if (ost->attachment_filename)
2809 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2812 dec_ctx = ist->dec_ctx;
2814 ost->st->disposition = ist->st->disposition;
2815 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2816 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2818 for (j=0; j<oc->nb_streams; j++) {
2819 AVStream *st = oc->streams[j];
2820 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2823 if (j == oc->nb_streams)
2824 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2825 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2828 if (ost->stream_copy) {
2830 uint64_t extra_size;
2832 av_assert0(ist && !ost->filter);
2834 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2836 if (extra_size > INT_MAX) {
2837 return AVERROR(EINVAL);
2840 /* if stream_copy is selected, no need to decode or encode */
2841 enc_ctx->codec_id = dec_ctx->codec_id;
2842 enc_ctx->codec_type = dec_ctx->codec_type;
2844 if (!enc_ctx->codec_tag) {
2845 unsigned int codec_tag;
2846 if (!oc->oformat->codec_tag ||
2847 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2848 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2849 enc_ctx->codec_tag = dec_ctx->codec_tag;
2852 enc_ctx->bit_rate = dec_ctx->bit_rate;
2853 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2854 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2855 enc_ctx->field_order = dec_ctx->field_order;
2856 if (dec_ctx->extradata_size) {
2857 enc_ctx->extradata = av_mallocz(extra_size);
2858 if (!enc_ctx->extradata) {
2859 return AVERROR(ENOMEM);
2861 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2863 enc_ctx->extradata_size= dec_ctx->extradata_size;
2864 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2866 enc_ctx->time_base = ist->st->time_base;
2868 * Avi is a special case here because it supports variable fps but
2869 * having the fps and timebase differe significantly adds quite some
2872 if(!strcmp(oc->oformat->name, "avi")) {
2873 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2874 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2875 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2876 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2878 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2879 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2880 enc_ctx->ticks_per_frame = 2;
2881 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2882 && av_q2d(ist->st->time_base) < 1.0/500
2884 enc_ctx->time_base = dec_ctx->time_base;
2885 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2886 enc_ctx->time_base.den *= 2;
2887 enc_ctx->ticks_per_frame = 2;
2889 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2890 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2891 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2892 && strcmp(oc->oformat->name, "f4v")
2894 if( copy_tb<0 && dec_ctx->time_base.den
2895 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2896 && av_q2d(ist->st->time_base) < 1.0/500
2898 enc_ctx->time_base = dec_ctx->time_base;
2899 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2902 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2903 && dec_ctx->time_base.num < dec_ctx->time_base.den
2904 && dec_ctx->time_base.num > 0
2905 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2906 enc_ctx->time_base = dec_ctx->time_base;
2909 if (!ost->frame_rate.num)
2910 ost->frame_rate = ist->framerate;
2911 if(ost->frame_rate.num)
2912 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2914 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2915 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2917 if (ist->st->nb_side_data) {
2918 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2919 sizeof(*ist->st->side_data));
2920 if (!ost->st->side_data)
2921 return AVERROR(ENOMEM);
2923 ost->st->nb_side_data = 0;
2924 for (j = 0; j < ist->st->nb_side_data; j++) {
2925 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2926 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2928 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2931 sd_dst->data = av_malloc(sd_src->size);
2933 return AVERROR(ENOMEM);
2934 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2935 sd_dst->size = sd_src->size;
2936 sd_dst->type = sd_src->type;
2937 ost->st->nb_side_data++;
2941 ost->parser = av_parser_init(enc_ctx->codec_id);
2943 switch (enc_ctx->codec_type) {
2944 case AVMEDIA_TYPE_AUDIO:
2945 if (audio_volume != 256) {
2946 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2949 enc_ctx->channel_layout = dec_ctx->channel_layout;
2950 enc_ctx->sample_rate = dec_ctx->sample_rate;
2951 enc_ctx->channels = dec_ctx->channels;
2952 enc_ctx->frame_size = dec_ctx->frame_size;
2953 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2954 enc_ctx->block_align = dec_ctx->block_align;
2955 enc_ctx->initial_padding = dec_ctx->delay;
2956 #if FF_API_AUDIOENC_DELAY
2957 enc_ctx->delay = dec_ctx->delay;
2959 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2960 enc_ctx->block_align= 0;
2961 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2962 enc_ctx->block_align= 0;
2964 case AVMEDIA_TYPE_VIDEO:
2965 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2966 enc_ctx->width = dec_ctx->width;
2967 enc_ctx->height = dec_ctx->height;
2968 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2969 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2971 av_mul_q(ost->frame_aspect_ratio,
2972 (AVRational){ enc_ctx->height, enc_ctx->width });
2973 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2974 "with stream copy may produce invalid files\n");
2976 else if (ist->st->sample_aspect_ratio.num)
2977 sar = ist->st->sample_aspect_ratio;
2979 sar = dec_ctx->sample_aspect_ratio;
2980 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2981 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2982 ost->st->r_frame_rate = ist->st->r_frame_rate;
2984 case AVMEDIA_TYPE_SUBTITLE:
2985 enc_ctx->width = dec_ctx->width;
2986 enc_ctx->height = dec_ctx->height;
2988 case AVMEDIA_TYPE_UNKNOWN:
2989 case AVMEDIA_TYPE_DATA:
2990 case AVMEDIA_TYPE_ATTACHMENT:
2997 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2999 /* should only happen when a default codec is not present. */
3000 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3001 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3002 ret = AVERROR(EINVAL);
3006 set_encoder_id(output_files[ost->file_index], ost);
3009 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3010 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3012 fg = init_simple_filtergraph(ist, ost);
3013 if (configure_filtergraph(fg)) {
3014 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3019 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3020 if (!ost->frame_rate.num)
3021 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3022 if (ist && !ost->frame_rate.num)
3023 ost->frame_rate = ist->framerate;
3024 if (ist && !ost->frame_rate.num)
3025 ost->frame_rate = ist->st->r_frame_rate;
3026 if (ist && !ost->frame_rate.num) {
3027 ost->frame_rate = (AVRational){25, 1};
3028 av_log(NULL, AV_LOG_WARNING,
3030 "about the input framerate is available. Falling "
3031 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3032 "if you want a different framerate.\n",
3033 ost->file_index, ost->index);
3035 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3036 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3037 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3038 ost->frame_rate = ost->enc->supported_framerates[idx];
3040 // reduce frame rate for mpeg4 to be within the spec limits
3041 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3042 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3043 ost->frame_rate.num, ost->frame_rate.den, 65535);
3047 switch (enc_ctx->codec_type) {
3048 case AVMEDIA_TYPE_AUDIO:
3049 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3050 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3051 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3052 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3053 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3055 case AVMEDIA_TYPE_VIDEO:
3056 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3057 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3058 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3059 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3060 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3061 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3062 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3064 for (j = 0; j < ost->forced_kf_count; j++)
3065 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3067 enc_ctx->time_base);
3069 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3070 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3071 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3072 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3073 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3074 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3075 if (!strncmp(ost->enc->name, "libx264", 7) &&
3076 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3077 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3078 av_log(NULL, AV_LOG_WARNING,
3079 "No pixel format specified, %s for H.264 encoding chosen.\n"
3080 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3081 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3082 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3083 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3084 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3085 av_log(NULL, AV_LOG_WARNING,
3086 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3087 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3088 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3089 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3091 ost->st->avg_frame_rate = ost->frame_rate;
3094 enc_ctx->width != dec_ctx->width ||
3095 enc_ctx->height != dec_ctx->height ||
3096 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3097 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3100 if (ost->forced_keyframes) {
3101 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3102 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3103 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3105 av_log(NULL, AV_LOG_ERROR,
3106 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3109 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3110 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3111 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3112 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3114 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3115 // parse it only for static kf timings
3116 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3117 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3121 case AVMEDIA_TYPE_SUBTITLE:
3122 enc_ctx->time_base = (AVRational){1, 1000};
3123 if (!enc_ctx->width) {
3124 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3125 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3128 case AVMEDIA_TYPE_DATA:
3136 if (ost->disposition) {
3137 static const AVOption opts[] = {
3138 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3139 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3140 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3141 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3142 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3143 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3144 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3145 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3146 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3147 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3148 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3149 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3150 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3151 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3154 static const AVClass class = {
3156 .item_name = av_default_item_name,
3158 .version = LIBAVUTIL_VERSION_INT,
3160 const AVClass *pclass = &class;
3162 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3168 /* open each encoder */
3169 for (i = 0; i < nb_output_streams; i++) {
3170 ret = init_output_stream(output_streams[i], error, sizeof(error));
3175 /* init input streams */
3176 for (i = 0; i < nb_input_streams; i++)
3177 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3178 for (i = 0; i < nb_output_streams; i++) {
3179 ost = output_streams[i];
3180 avcodec_close(ost->enc_ctx);
3185 /* discard unused programs */
3186 for (i = 0; i < nb_input_files; i++) {
3187 InputFile *ifile = input_files[i];
3188 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3189 AVProgram *p = ifile->ctx->programs[j];
3190 int discard = AVDISCARD_ALL;
3192 for (k = 0; k < p->nb_stream_indexes; k++)
3193 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3194 discard = AVDISCARD_DEFAULT;
3197 p->discard = discard;
3201 /* open files and write file headers */
3202 for (i = 0; i < nb_output_files; i++) {
3203 oc = output_files[i]->ctx;
3204 oc->interrupt_callback = int_cb;
3205 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3206 snprintf(error, sizeof(error),
3207 "Could not write header for output file #%d "
3208 "(incorrect codec parameters ?): %s",
3209 i, av_err2str(ret));
3210 ret = AVERROR(EINVAL);
3213 // assert_avoptions(output_files[i]->opts);
3214 if (strcmp(oc->oformat->name, "rtp")) {
3220 /* dump the file output parameters - cannot be done before in case
3222 for (i = 0; i < nb_output_files; i++) {
3223 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3226 /* dump the stream mapping */
3227 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3228 for (i = 0; i < nb_input_streams; i++) {
3229 ist = input_streams[i];
3231 for (j = 0; j < ist->nb_filters; j++) {
3232 if (ist->filters[j]->graph->graph_desc) {
3233 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3234 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3235 ist->filters[j]->name);
3236 if (nb_filtergraphs > 1)
3237 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3238 av_log(NULL, AV_LOG_INFO, "\n");
3243 for (i = 0; i < nb_output_streams; i++) {
3244 ost = output_streams[i];
3246 if (ost->attachment_filename) {
3247 /* an attached file */
3248 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3249 ost->attachment_filename, ost->file_index, ost->index);
3253 if (ost->filter && ost->filter->graph->graph_desc) {
3254 /* output from a complex graph */
3255 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3256 if (nb_filtergraphs > 1)
3257 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3259 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3260 ost->index, ost->enc ? ost->enc->name : "?");
3264 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3265 input_streams[ost->source_index]->file_index,
3266 input_streams[ost->source_index]->st->index,
3269 if (ost->sync_ist != input_streams[ost->source_index])
3270 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3271 ost->sync_ist->file_index,
3272 ost->sync_ist->st->index);
3273 if (ost->stream_copy)
3274 av_log(NULL, AV_LOG_INFO, " (copy)");
3276 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3277 const AVCodec *out_codec = ost->enc;
3278 const char *decoder_name = "?";
3279 const char *in_codec_name = "?";
3280 const char *encoder_name = "?";
3281 const char *out_codec_name = "?";
3282 const AVCodecDescriptor *desc;
3285 decoder_name = in_codec->name;
3286 desc = avcodec_descriptor_get(in_codec->id);
3288 in_codec_name = desc->name;
3289 if (!strcmp(decoder_name, in_codec_name))
3290 decoder_name = "native";
3294 encoder_name = out_codec->name;
3295 desc = avcodec_descriptor_get(out_codec->id);
3297 out_codec_name = desc->name;
3298 if (!strcmp(encoder_name, out_codec_name))
3299 encoder_name = "native";
3302 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3303 in_codec_name, decoder_name,
3304 out_codec_name, encoder_name);
3306 av_log(NULL, AV_LOG_INFO, "\n");
3310 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3314 if (sdp_filename || want_sdp) {
3318 transcode_init_done = 1;
3323 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3324 static int need_output(void)
3328 for (i = 0; i < nb_output_streams; i++) {
3329 OutputStream *ost = output_streams[i];
3330 OutputFile *of = output_files[ost->file_index];
3331 AVFormatContext *os = output_files[ost->file_index]->ctx;
3333 if (ost->finished ||
3334 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3336 if (ost->frame_number >= ost->max_frames) {
3338 for (j = 0; j < of->ctx->nb_streams; j++)
3339 close_output_stream(output_streams[of->ost_index + j]);
3350 * Select the output stream to process.
3352 * @return selected output stream, or NULL if none available
3354 static OutputStream *choose_output(void)
3357 int64_t opts_min = INT64_MAX;
3358 OutputStream *ost_min = NULL;
3360 for (i = 0; i < nb_output_streams; i++) {
3361 OutputStream *ost = output_streams[i];
3362 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3364 if (!ost->finished && opts < opts_min) {
3366 ost_min = ost->unavailable ? NULL : ost;
3372 static int check_keyboard_interaction(int64_t cur_time)
3375 static int64_t last_time;
3376 if (received_nb_signals)
3377 return AVERROR_EXIT;
3378 /* read_key() returns 0 on EOF */
3379 if(cur_time - last_time >= 100000 && !run_as_daemon){
3381 last_time = cur_time;
3385 return AVERROR_EXIT;
3386 if (key == '+') av_log_set_level(av_log_get_level()+10);
3387 if (key == '-') av_log_set_level(av_log_get_level()-10);
3388 if (key == 's') qp_hist ^= 1;
3391 do_hex_dump = do_pkt_dump = 0;
3392 } else if(do_pkt_dump){
3396 av_log_set_level(AV_LOG_DEBUG);
3398 if (key == 'c' || key == 'C'){
3399 char buf[4096], target[64], command[256], arg[256] = {0};
3402 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3404 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3409 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3410 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3411 target, time, command, arg);
3412 for (i = 0; i < nb_filtergraphs; i++) {
3413 FilterGraph *fg = filtergraphs[i];
3416 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3417 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3418 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3419 } else if (key == 'c') {
3420 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3421 ret = AVERROR_PATCHWELCOME;
3423 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3425 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3430 av_log(NULL, AV_LOG_ERROR,
3431 "Parse error, at least 3 arguments were expected, "
3432 "only %d given in string '%s'\n", n, buf);
3435 if (key == 'd' || key == 'D'){
3438 debug = input_streams[0]->st->codec->debug<<1;
3439 if(!debug) debug = 1;
3440 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3446 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3450 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3451 fprintf(stderr,"error parsing debug value\n");
3453 for(i=0;i<nb_input_streams;i++) {
3454 input_streams[i]->st->codec->debug = debug;
3456 for(i=0;i<nb_output_streams;i++) {
3457 OutputStream *ost = output_streams[i];
3458 ost->enc_ctx->debug = debug;
3460 if(debug) av_log_set_level(AV_LOG_DEBUG);
3461 fprintf(stderr,"debug=%d\n", debug);
3464 fprintf(stderr, "key function\n"
3465 "? show this help\n"
3466 "+ increase verbosity\n"
3467 "- decrease verbosity\n"
3468 "c Send command to first matching filter supporting it\n"
3469 "C Send/Que command to all matching filters\n"
3470 "D cycle through available debug modes\n"
3471 "h dump packets/hex press to cycle through the 3 states\n"
3473 "s Show QP histogram\n"
3480 static void *input_thread(void *arg)
3483 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3488 ret = av_read_frame(f->ctx, &pkt);
3490 if (ret == AVERROR(EAGAIN)) {
3495 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3498 av_dup_packet(&pkt);
3499 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3500 if (flags && ret == AVERROR(EAGAIN)) {
3502 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3503 av_log(f->ctx, AV_LOG_WARNING,
3504 "Thread message queue blocking; consider raising the "
3505 "thread_queue_size option (current value: %d)\n",
3506 f->thread_queue_size);
3509 if (ret != AVERROR_EOF)
3510 av_log(f->ctx, AV_LOG_ERROR,
3511 "Unable to send packet to main thread: %s\n",
3513 av_free_packet(&pkt);
3514 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3522 static void free_input_threads(void)
3526 for (i = 0; i < nb_input_files; i++) {
3527 InputFile *f = input_files[i];
3530 if (!f || !f->in_thread_queue)
3532 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3533 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3534 av_free_packet(&pkt);
3536 pthread_join(f->thread, NULL);
3538 av_thread_message_queue_free(&f->in_thread_queue);
3542 static int init_input_threads(void)
3546 if (nb_input_files == 1)
3549 for (i = 0; i < nb_input_files; i++) {
3550 InputFile *f = input_files[i];
3552 if (f->ctx->pb ? !f->ctx->pb->seekable :
3553 strcmp(f->ctx->iformat->name, "lavfi"))
3554 f->non_blocking = 1;
3555 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3556 f->thread_queue_size, sizeof(AVPacket));
3560 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3561 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3562 av_thread_message_queue_free(&f->in_thread_queue);
3563 return AVERROR(ret);
3569 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3571 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3573 AV_THREAD_MESSAGE_NONBLOCK : 0);
3577 static int get_input_packet(InputFile *f, AVPacket *pkt)
3581 for (i = 0; i < f->nb_streams; i++) {
3582 InputStream *ist = input_streams[f->ist_index + i];
3583 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3584 int64_t now = av_gettime_relative() - ist->start;
3586 return AVERROR(EAGAIN);
3591 if (nb_input_files > 1)
3592 return get_input_packet_mt(f, pkt);
3594 return av_read_frame(f->ctx, pkt);
3597 static int got_eagain(void)
3600 for (i = 0; i < nb_output_streams; i++)
3601 if (output_streams[i]->unavailable)
3606 static void reset_eagain(void)
3609 for (i = 0; i < nb_input_files; i++)
3610 input_files[i]->eagain = 0;
3611 for (i = 0; i < nb_output_streams; i++)
3612 output_streams[i]->unavailable = 0;
3617 * - 0 -- one packet was read and processed
3618 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3619 * this function should be called again
3620 * - AVERROR_EOF -- this function should not be called again
3622 static int process_input(int file_index)
3624 InputFile *ifile = input_files[file_index];
3625 AVFormatContext *is;
3631 ret = get_input_packet(ifile, &pkt);
3633 if (ret == AVERROR(EAGAIN)) {
3638 if (ret != AVERROR_EOF) {
3639 print_error(is->filename, ret);
3644 for (i = 0; i < ifile->nb_streams; i++) {
3645 ist = input_streams[ifile->ist_index + i];
3646 if (ist->decoding_needed) {
3647 ret = process_input_packet(ist, NULL);
3652 /* mark all outputs that don't go through lavfi as finished */
3653 for (j = 0; j < nb_output_streams; j++) {
3654 OutputStream *ost = output_streams[j];
3656 if (ost->source_index == ifile->ist_index + i &&
3657 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3658 finish_output_stream(ost);
3662 ifile->eof_reached = 1;
3663 return AVERROR(EAGAIN);
3669 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3670 is->streams[pkt.stream_index]);
3672 /* the following test is needed in case new streams appear
3673 dynamically in stream : we ignore them */
3674 if (pkt.stream_index >= ifile->nb_streams) {
3675 report_new_stream(file_index, &pkt);
3676 goto discard_packet;
3679 ist = input_streams[ifile->ist_index + pkt.stream_index];
3681 ist->data_size += pkt.size;
3685 goto discard_packet;
3688 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3689 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3690 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3691 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3692 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3693 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3694 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3695 av_ts2str(input_files[ist->file_index]->ts_offset),
3696 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3699 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3700 int64_t stime, stime2;
3701 // Correcting starttime based on the enabled streams
3702 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3703 // so we instead do it here as part of discontinuity handling
3704 if ( ist->next_dts == AV_NOPTS_VALUE
3705 && ifile->ts_offset == -is->start_time
3706 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3707 int64_t new_start_time = INT64_MAX;
3708 for (i=0; i<is->nb_streams; i++) {
3709 AVStream *st = is->streams[i];
3710 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3712 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3714 if (new_start_time > is->start_time) {
3715 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3716 ifile->ts_offset = -new_start_time;
3720 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3721 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3722 ist->wrap_correction_done = 1;
3724 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3725 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3726 ist->wrap_correction_done = 0;
3728 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3729 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3730 ist->wrap_correction_done = 0;
3734 /* add the stream-global side data to the first packet */
3735 if (ist->nb_packets == 1) {
3736 if (ist->st->nb_side_data)
3737 av_packet_split_side_data(&pkt);
3738 for (i = 0; i < ist->st->nb_side_data; i++) {
3739 AVPacketSideData *src_sd = &ist->st->side_data[i];
3742 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3744 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3747 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3751 memcpy(dst_data, src_sd->data, src_sd->size);
3755 if (pkt.dts != AV_NOPTS_VALUE)
3756 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3757 if (pkt.pts != AV_NOPTS_VALUE)
3758 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3760 if (pkt.pts != AV_NOPTS_VALUE)
3761 pkt.pts *= ist->ts_scale;
3762 if (pkt.dts != AV_NOPTS_VALUE)
3763 pkt.dts *= ist->ts_scale;
3765 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3766 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3767 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3768 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3769 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3770 int64_t delta = pkt_dts - ifile->last_ts;
3771 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3772 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3773 ifile->ts_offset -= delta;
3774 av_log(NULL, AV_LOG_DEBUG,
3775 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3776 delta, ifile->ts_offset);
3777 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3778 if (pkt.pts != AV_NOPTS_VALUE)
3779 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3783 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3784 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3785 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3787 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3788 int64_t delta = pkt_dts - ist->next_dts;
3789 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3790 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3791 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3792 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3793 ifile->ts_offset -= delta;
3794 av_log(NULL, AV_LOG_DEBUG,
3795 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3796 delta, ifile->ts_offset);
3797 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3798 if (pkt.pts != AV_NOPTS_VALUE)
3799 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3802 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3803 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3804 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3805 pkt.dts = AV_NOPTS_VALUE;
3807 if (pkt.pts != AV_NOPTS_VALUE){
3808 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3809 delta = pkt_pts - ist->next_dts;
3810 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3811 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3812 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3813 pkt.pts = AV_NOPTS_VALUE;
3819 if (pkt.dts != AV_NOPTS_VALUE)
3820 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3823 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3824 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3825 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3826 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3827 av_ts2str(input_files[ist->file_index]->ts_offset),
3828 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3831 sub2video_heartbeat(ist, pkt.pts);
3833 process_input_packet(ist, &pkt);
3836 av_free_packet(&pkt);
3842 * Perform a step of transcoding for the specified filter graph.
3844 * @param[in] graph filter graph to consider
3845 * @param[out] best_ist input stream where a frame would allow to continue
3846 * @return 0 for success, <0 for error
3848 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3851 int nb_requests, nb_requests_max = 0;
3852 InputFilter *ifilter;
3856 ret = avfilter_graph_request_oldest(graph->graph);
3858 return reap_filters(0);
3860 if (ret == AVERROR_EOF) {
3861 ret = reap_filters(1);
3862 for (i = 0; i < graph->nb_outputs; i++)
3863 close_output_stream(graph->outputs[i]->ost);
3866 if (ret != AVERROR(EAGAIN))
3869 for (i = 0; i < graph->nb_inputs; i++) {
3870 ifilter = graph->inputs[i];
3872 if (input_files[ist->file_index]->eagain ||
3873 input_files[ist->file_index]->eof_reached)
3875 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3876 if (nb_requests > nb_requests_max) {
3877 nb_requests_max = nb_requests;
3883 for (i = 0; i < graph->nb_outputs; i++)
3884 graph->outputs[i]->ost->unavailable = 1;
3890 * Run a single step of transcoding.
3892 * @return 0 for success, <0 for error
3894 static int transcode_step(void)
3900 ost = choose_output();
3907 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3912 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3917 av_assert0(ost->source_index >= 0);
3918 ist = input_streams[ost->source_index];
3921 ret = process_input(ist->file_index);
3922 if (ret == AVERROR(EAGAIN)) {
3923 if (input_files[ist->file_index]->eagain)
3924 ost->unavailable = 1;
3929 return ret == AVERROR_EOF ? 0 : ret;
3931 return reap_filters(0);
3935 * The following code is the main loop of the file converter
3937 static int transcode(void)
3940 AVFormatContext *os;
3943 int64_t timer_start;
3945 ret = transcode_init();
3949 if (stdin_interaction) {
3950 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3953 timer_start = av_gettime_relative();
3956 if ((ret = init_input_threads()) < 0)
3960 while (!received_sigterm) {
3961 int64_t cur_time= av_gettime_relative();
3963 /* if 'q' pressed, exits */
3964 if (stdin_interaction)
3965 if (check_keyboard_interaction(cur_time) < 0)
3968 /* check if there's any stream where output is still needed */
3969 if (!need_output()) {
3970 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3974 ret = transcode_step();
3976 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3980 av_strerror(ret, errbuf, sizeof(errbuf));
3982 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3987 /* dump report by using the output first video and audio streams */
3988 print_report(0, timer_start, cur_time);
3991 free_input_threads();
3994 /* at the end of stream, we must flush the decoder buffers */
3995 for (i = 0; i < nb_input_streams; i++) {
3996 ist = input_streams[i];
3997 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3998 process_input_packet(ist, NULL);
4005 /* write the trailer if needed and close file */
4006 for (i = 0; i < nb_output_files; i++) {
4007 os = output_files[i]->ctx;
4008 av_write_trailer(os);
4011 /* dump report by using the first video and audio streams */
4012 print_report(1, timer_start, av_gettime_relative());
4014 /* close each encoder */
4015 for (i = 0; i < nb_output_streams; i++) {
4016 ost = output_streams[i];
4017 if (ost->encoding_needed) {
4018 av_freep(&ost->enc_ctx->stats_in);
4022 /* close each decoder */
4023 for (i = 0; i < nb_input_streams; i++) {
4024 ist = input_streams[i];
4025 if (ist->decoding_needed) {
4026 avcodec_close(ist->dec_ctx);
4027 if (ist->hwaccel_uninit)
4028 ist->hwaccel_uninit(ist->dec_ctx);
4037 free_input_threads();
4040 if (output_streams) {
4041 for (i = 0; i < nb_output_streams; i++) {
4042 ost = output_streams[i];
4045 fclose(ost->logfile);
4046 ost->logfile = NULL;
4048 av_freep(&ost->forced_kf_pts);
4049 av_freep(&ost->apad);
4050 av_freep(&ost->disposition);
4051 av_dict_free(&ost->encoder_opts);
4052 av_dict_free(&ost->sws_dict);
4053 av_dict_free(&ost->swr_opts);
4054 av_dict_free(&ost->resample_opts);
4055 av_dict_free(&ost->bsf_args);
4063 static int64_t getutime(void)
4066 struct rusage rusage;
4068 getrusage(RUSAGE_SELF, &rusage);
4069 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4070 #elif HAVE_GETPROCESSTIMES
4072 FILETIME c, e, k, u;
4073 proc = GetCurrentProcess();
4074 GetProcessTimes(proc, &c, &e, &k, &u);
4075 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4077 return av_gettime_relative();
4081 static int64_t getmaxrss(void)
4083 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4084 struct rusage rusage;
4085 getrusage(RUSAGE_SELF, &rusage);
4086 return (int64_t)rusage.ru_maxrss * 1024;
4087 #elif HAVE_GETPROCESSMEMORYINFO
4089 PROCESS_MEMORY_COUNTERS memcounters;
4090 proc = GetCurrentProcess();
4091 memcounters.cb = sizeof(memcounters);
4092 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4093 return memcounters.PeakPagefileUsage;
4099 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4103 int main(int argc, char **argv)
4108 register_exit(ffmpeg_cleanup);
4110 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4112 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4113 parse_loglevel(argc, argv, options);
4115 if(argc>1 && !strcmp(argv[1], "-d")){
4117 av_log_set_callback(log_callback_null);
4122 avcodec_register_all();
4124 avdevice_register_all();
4126 avfilter_register_all();
4128 avformat_network_init();
4130 show_banner(argc, argv, options);
4134 /* parse options and open all input/output files */
4135 ret = ffmpeg_parse_options(argc, argv);
4139 if (nb_output_files <= 0 && nb_input_files == 0) {
4141 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4145 /* file converter / grab */
4146 if (nb_output_files <= 0) {
4147 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4151 // if (nb_input_files == 0) {
4152 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4156 current_time = ti = getutime();
4157 if (transcode() < 0)
4159 ti = getutime() - ti;
4161 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4163 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4164 decode_error_stat[0], decode_error_stat[1]);
4165 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4168 exit_program(received_nb_signals ? 255 : main_return_code);
4169 return main_return_code;