2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 InputStream **input_streams = NULL;
140 int nb_input_streams = 0;
141 InputFile **input_files = NULL;
142 int nb_input_files = 0;
144 OutputStream **output_streams = NULL;
145 int nb_output_streams = 0;
146 OutputFile **output_files = NULL;
147 int nb_output_files = 0;
149 FilterGraph **filtergraphs;
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
160 static void free_input_threads(void);
164 Convert subtitles to video with alpha to insert them in filter graphs.
165 This is a temporary solution until libavfilter gets real subtitles support.
168 static int sub2video_get_blank_frame(InputStream *ist)
171 AVFrame *frame = ist->sub2video.frame;
173 av_frame_unref(frame);
174 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
176 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
177 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186 uint32_t *pal, *dst2;
190 if (r->type != SUBTITLE_BITMAP) {
191 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196 r->x, r->y, r->w, r->h, w, h
201 dst += r->y * dst_linesize + r->x * 4;
202 src = r->pict.data[0];
203 pal = (uint32_t *)r->pict.data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->pict.linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 AVFrame *frame = ist->sub2video.frame;
233 int64_t pts, end_pts;
238 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 num_rects = sub->num_rects;
244 pts = ist->sub2video.end_pts;
248 if (sub2video_get_blank_frame(ist) < 0) {
249 av_log(ist->dec_ctx, AV_LOG_ERROR,
250 "Impossible to get a blank canvas.\n");
253 dst = frame->data [0];
254 dst_linesize = frame->linesize[0];
255 for (i = 0; i < num_rects; i++)
256 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257 sub2video_push_ref(ist, pts);
258 ist->sub2video.end_pts = end_pts;
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 InputFile *infile = input_files[ist->file_index];
267 /* When a frame is read from a file, examine all sub2video streams in
268 the same file and send the sub2video frame again. Otherwise, decoded
269 video frames could be accumulating in the filter graph while a filter
270 (possibly overlay) is desperately waiting for a subtitle frame. */
271 for (i = 0; i < infile->nb_streams; i++) {
272 InputStream *ist2 = input_streams[infile->ist_index + i];
273 if (!ist2->sub2video.frame)
275 /* subtitles seem to be usually muxed ahead of other streams;
276 if not, subtracting a larger time here is necessary */
277 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278 /* do not send the heartbeat frame if the subtitle is already ahead */
279 if (pts2 <= ist2->sub2video.last_pts)
281 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282 sub2video_update(ist2, NULL);
283 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286 sub2video_push_ref(ist2, pts2);
290 static void sub2video_flush(InputStream *ist)
294 if (ist->sub2video.end_pts < INT64_MAX)
295 sub2video_update(ist, NULL);
296 for (i = 0; i < ist->nb_filters; i++)
297 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
300 /* end of sub2video hack */
302 static void term_exit_sigsafe(void)
306 tcsetattr (0, TCSANOW, &oldtty);
312 av_log(NULL, AV_LOG_QUIET, "%s", "");
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
323 sigterm_handler(int sig)
325 received_sigterm = sig;
326 received_nb_signals++;
328 if(received_nb_signals > 3) {
329 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330 strlen("Received > 3 system signals, hard exiting\n"));
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
344 case CTRL_BREAK_EVENT:
345 sigterm_handler(SIGINT);
348 case CTRL_CLOSE_EVENT:
349 case CTRL_LOGOFF_EVENT:
350 case CTRL_SHUTDOWN_EVENT:
351 sigterm_handler(SIGTERM);
352 /* Basically, with these 3 events, when we return from this method the
353 process is hard terminated, so stall as long as we need to
354 to try and let the main thread(s) clean up and gracefully terminate
355 (we have at most 5 seconds, but should be done far before that). */
356 while (!ffmpeg_exited) {
362 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
375 istty = isatty(0) && isatty(2);
377 if (istty && tcgetattr (0, &tty) == 0) {
381 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
382 |INLCR|IGNCR|ICRNL|IXON);
383 tty.c_oflag |= OPOST;
384 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
385 tty.c_cflag &= ~(CSIZE|PARENB);
390 tcsetattr (0, TCSANOW, &tty);
392 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
396 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
397 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 signal(SIGXCPU, sigterm_handler);
401 #if HAVE_SETCONSOLECTRLHANDLER
402 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
406 /* read a key without blocking */
407 static int read_key(void)
419 n = select(1, &rfds, NULL, NULL, &tv);
428 # if HAVE_PEEKNAMEDPIPE
430 static HANDLE input_handle;
433 input_handle = GetStdHandle(STD_INPUT_HANDLE);
434 is_pipe = !GetConsoleMode(input_handle, &dw);
438 /* When running under a GUI, you will end here. */
439 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
440 // input pipe may have been closed by the program that ran ffmpeg
458 static int decode_interrupt_cb(void *ctx)
460 return received_nb_signals > transcode_init_done;
463 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465 static void ffmpeg_cleanup(int ret)
470 int maxrss = getmaxrss() / 1024;
471 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
474 for (i = 0; i < nb_filtergraphs; i++) {
475 FilterGraph *fg = filtergraphs[i];
476 avfilter_graph_free(&fg->graph);
477 for (j = 0; j < fg->nb_inputs; j++) {
478 av_freep(&fg->inputs[j]->name);
479 av_freep(&fg->inputs[j]);
481 av_freep(&fg->inputs);
482 for (j = 0; j < fg->nb_outputs; j++) {
483 av_freep(&fg->outputs[j]->name);
484 av_freep(&fg->outputs[j]);
486 av_freep(&fg->outputs);
487 av_freep(&fg->graph_desc);
489 av_freep(&filtergraphs[i]);
491 av_freep(&filtergraphs);
493 av_freep(&subtitle_out);
496 for (i = 0; i < nb_output_files; i++) {
497 OutputFile *of = output_files[i];
502 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504 avformat_free_context(s);
505 av_dict_free(&of->opts);
507 av_freep(&output_files[i]);
509 for (i = 0; i < nb_output_streams; i++) {
510 OutputStream *ost = output_streams[i];
511 AVBitStreamFilterContext *bsfc;
516 bsfc = ost->bitstream_filters;
518 AVBitStreamFilterContext *next = bsfc->next;
519 av_bitstream_filter_close(bsfc);
522 ost->bitstream_filters = NULL;
523 av_frame_free(&ost->filtered_frame);
524 av_frame_free(&ost->last_frame);
526 av_parser_close(ost->parser);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 avcodec_free_context(&ost->enc_ctx);
538 av_freep(&output_streams[i]);
541 free_input_threads();
543 for (i = 0; i < nb_input_files; i++) {
544 avformat_close_input(&input_files[i]->ctx);
545 av_freep(&input_files[i]);
547 for (i = 0; i < nb_input_streams; i++) {
548 InputStream *ist = input_streams[i];
550 av_frame_free(&ist->decoded_frame);
551 av_frame_free(&ist->filter_frame);
552 av_dict_free(&ist->decoder_opts);
553 avsubtitle_free(&ist->prev_sub.subtitle);
554 av_frame_free(&ist->sub2video.frame);
555 av_freep(&ist->filters);
556 av_freep(&ist->hwaccel_device);
558 avcodec_free_context(&ist->dec_ctx);
560 av_freep(&input_streams[i]);
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
683 av_packet_split_side_data(pkt);
686 AVPacket new_pkt = *pkt;
687 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
690 int a = av_bitstream_filter_filter(bsfc, avctx,
691 bsf_arg ? bsf_arg->value : NULL,
692 &new_pkt.data, &new_pkt.size,
693 pkt->data, pkt->size,
694 pkt->flags & AV_PKT_FLAG_KEY);
695 if(a == 0 && new_pkt.data != pkt->data) {
696 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
698 memcpy(t, new_pkt.data, new_pkt.size);
699 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
707 pkt->side_data = NULL;
708 pkt->side_data_elems = 0;
710 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
711 av_buffer_default_free, NULL, 0);
716 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
717 bsfc->filter->name, pkt->stream_index,
718 avctx->codec ? avctx->codec->name : "copy");
728 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
729 if (pkt->dts != AV_NOPTS_VALUE &&
730 pkt->pts != AV_NOPTS_VALUE &&
731 pkt->dts > pkt->pts) {
732 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
734 ost->file_index, ost->st->index);
736 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
737 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
738 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
741 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
742 pkt->dts != AV_NOPTS_VALUE &&
743 ost->last_mux_dts != AV_NOPTS_VALUE) {
744 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
745 if (pkt->dts < max) {
746 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
747 av_log(s, loglevel, "Non-monotonous DTS in output stream "
748 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
749 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
751 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
754 av_log(s, loglevel, "changing to %"PRId64". This may result "
755 "in incorrect timestamps in the output file.\n",
757 if(pkt->pts >= pkt->dts)
758 pkt->pts = FFMAX(pkt->pts, max);
763 ost->last_mux_dts = pkt->dts;
765 ost->data_size += pkt->size;
766 ost->packets_written++;
768 pkt->stream_index = ost->index;
771 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
772 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
773 av_get_media_type_string(ost->enc_ctx->codec_type),
774 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
775 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
780 ret = av_interleaved_write_frame(s, pkt);
782 print_error("av_interleaved_write_frame()", ret);
783 main_return_code = 1;
784 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
789 static void close_output_stream(OutputStream *ost)
791 OutputFile *of = output_files[ost->file_index];
793 ost->finished |= ENCODER_FINISHED;
795 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
796 of->recording_time = FFMIN(of->recording_time, end);
800 static int check_recording_time(OutputStream *ost)
802 OutputFile *of = output_files[ost->file_index];
804 if (of->recording_time != INT64_MAX &&
805 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
806 AV_TIME_BASE_Q) >= 0) {
807 close_output_stream(ost);
813 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
816 AVCodecContext *enc = ost->enc_ctx;
820 av_init_packet(&pkt);
824 if (!check_recording_time(ost))
827 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
828 frame->pts = ost->sync_opts;
829 ost->sync_opts = frame->pts + frame->nb_samples;
830 ost->samples_encoded += frame->nb_samples;
831 ost->frames_encoded++;
833 av_assert0(pkt.size || !pkt.data);
834 update_benchmark(NULL);
836 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
837 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
838 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
839 enc->time_base.num, enc->time_base.den);
842 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
843 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
846 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
849 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
852 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
853 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
854 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
855 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
858 write_frame(s, &pkt, ost);
862 static void do_subtitle_out(AVFormatContext *s,
867 int subtitle_out_max_size = 1024 * 1024;
868 int subtitle_out_size, nb, i;
873 if (sub->pts == AV_NOPTS_VALUE) {
874 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
883 subtitle_out = av_malloc(subtitle_out_max_size);
885 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
890 /* Note: DVB subtitle need one packet to draw them and one other
891 packet to clear them */
892 /* XXX: signal it in the codec context ? */
893 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
898 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
900 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
901 pts -= output_files[ost->file_index]->start_time;
902 for (i = 0; i < nb; i++) {
903 unsigned save_num_rects = sub->num_rects;
905 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
906 if (!check_recording_time(ost))
910 // start_display_time is required to be 0
911 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
912 sub->end_display_time -= sub->start_display_time;
913 sub->start_display_time = 0;
917 ost->frames_encoded++;
919 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
920 subtitle_out_max_size, sub);
922 sub->num_rects = save_num_rects;
923 if (subtitle_out_size < 0) {
924 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
928 av_init_packet(&pkt);
929 pkt.data = subtitle_out;
930 pkt.size = subtitle_out_size;
931 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
932 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
933 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
934 /* XXX: the pts correction is handled here. Maybe handling
935 it in the codec would be better */
937 pkt.pts += 90 * sub->start_display_time;
939 pkt.pts += 90 * sub->end_display_time;
942 write_frame(s, &pkt, ost);
946 static void do_video_out(AVFormatContext *s,
948 AVFrame *next_picture,
951 int ret, format_video_sync;
953 AVCodecContext *enc = ost->enc_ctx;
954 AVCodecContext *mux_enc = ost->st->codec;
955 int nb_frames, nb0_frames, i;
956 double delta, delta0;
959 InputStream *ist = NULL;
960 AVFilterContext *filter = ost->filter->filter;
962 if (ost->source_index >= 0)
963 ist = input_streams[ost->source_index];
965 if (filter->inputs[0]->frame_rate.num > 0 &&
966 filter->inputs[0]->frame_rate.den > 0)
967 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
969 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
970 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
972 if (!ost->filters_script &&
976 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
977 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
982 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
983 ost->last_nb0_frames[1],
984 ost->last_nb0_frames[2]);
986 delta0 = sync_ipts - ost->sync_opts;
987 delta = delta0 + duration;
989 /* by default, we output a single frame */
993 format_video_sync = video_sync_method;
994 if (format_video_sync == VSYNC_AUTO) {
995 if(!strcmp(s->oformat->name, "avi")) {
996 format_video_sync = VSYNC_VFR;
998 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1000 && format_video_sync == VSYNC_CFR
1001 && input_files[ist->file_index]->ctx->nb_streams == 1
1002 && input_files[ist->file_index]->input_ts_offset == 0) {
1003 format_video_sync = VSYNC_VSCFR;
1005 if (format_video_sync == VSYNC_CFR && copy_ts) {
1006 format_video_sync = VSYNC_VSCFR;
1012 format_video_sync != VSYNC_PASSTHROUGH &&
1013 format_video_sync != VSYNC_DROP) {
1014 double cor = FFMIN(-delta0, duration);
1015 if (delta0 < -0.6) {
1016 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1018 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1024 switch (format_video_sync) {
1026 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1027 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1030 ost->sync_opts = lrint(sync_ipts);
1033 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1034 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1036 } else if (delta < -1.1)
1038 else if (delta > 1.1) {
1039 nb_frames = lrintf(delta);
1041 nb0_frames = lrintf(delta0 - 0.6);
1047 else if (delta > 0.6)
1048 ost->sync_opts = lrint(sync_ipts);
1051 case VSYNC_PASSTHROUGH:
1052 ost->sync_opts = lrint(sync_ipts);
1059 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1060 nb0_frames = FFMIN(nb0_frames, nb_frames);
1062 memmove(ost->last_nb0_frames + 1,
1063 ost->last_nb0_frames,
1064 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1065 ost->last_nb0_frames[0] = nb0_frames;
1067 if (nb0_frames == 0 && ost->last_droped) {
1069 av_log(NULL, AV_LOG_VERBOSE,
1070 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1071 ost->frame_number, ost->st->index, ost->last_frame->pts);
1073 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1074 if (nb_frames > dts_error_threshold * 30) {
1075 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1079 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1080 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1082 ost->last_droped = nb_frames == nb0_frames && next_picture;
1084 /* duplicates frame if needed */
1085 for (i = 0; i < nb_frames; i++) {
1086 AVFrame *in_picture;
1087 av_init_packet(&pkt);
1091 if (i < nb0_frames && ost->last_frame) {
1092 in_picture = ost->last_frame;
1094 in_picture = next_picture;
1099 in_picture->pts = ost->sync_opts;
1102 if (!check_recording_time(ost))
1104 if (ost->frame_number >= ost->max_frames)
1108 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1109 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1110 /* raw pictures are written as AVPicture structure to
1111 avoid any copies. We support temporarily the older
1113 if (in_picture->interlaced_frame)
1114 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1116 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1117 pkt.data = (uint8_t *)in_picture;
1118 pkt.size = sizeof(AVPicture);
1119 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1120 pkt.flags |= AV_PKT_FLAG_KEY;
1122 write_frame(s, &pkt, ost);
1124 int got_packet, forced_keyframe = 0;
1127 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1128 ost->top_field_first >= 0)
1129 in_picture->top_field_first = !!ost->top_field_first;
1131 if (in_picture->interlaced_frame) {
1132 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1133 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1135 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1137 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1139 in_picture->quality = enc->global_quality;
1140 in_picture->pict_type = 0;
1142 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1143 in_picture->pts * av_q2d(enc->time_base) : NAN;
1144 if (ost->forced_kf_index < ost->forced_kf_count &&
1145 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1146 ost->forced_kf_index++;
1147 forced_keyframe = 1;
1148 } else if (ost->forced_keyframes_pexpr) {
1150 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1151 res = av_expr_eval(ost->forced_keyframes_pexpr,
1152 ost->forced_keyframes_expr_const_values, NULL);
1153 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1154 ost->forced_keyframes_expr_const_values[FKF_N],
1155 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1156 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1157 ost->forced_keyframes_expr_const_values[FKF_T],
1158 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1161 forced_keyframe = 1;
1162 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1163 ost->forced_keyframes_expr_const_values[FKF_N];
1164 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1165 ost->forced_keyframes_expr_const_values[FKF_T];
1166 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1169 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1170 } else if ( ost->forced_keyframes
1171 && !strncmp(ost->forced_keyframes, "source", 6)
1172 && in_picture->key_frame==1) {
1173 forced_keyframe = 1;
1176 if (forced_keyframe) {
1177 in_picture->pict_type = AV_PICTURE_TYPE_I;
1178 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1181 update_benchmark(NULL);
1183 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1184 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1185 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1186 enc->time_base.num, enc->time_base.den);
1189 ost->frames_encoded++;
1191 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1192 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1194 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1200 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1201 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1202 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1203 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1206 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1207 pkt.pts = ost->sync_opts;
1209 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1212 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1213 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1214 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1215 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1218 frame_size = pkt.size;
1219 write_frame(s, &pkt, ost);
1221 /* if two pass, output log */
1222 if (ost->logfile && enc->stats_out) {
1223 fprintf(ost->logfile, "%s", enc->stats_out);
1229 * For video, number of frames in == number of packets out.
1230 * But there may be reordering, so we can't throw away frames on encoder
1231 * flush, we need to limit them here, before they go into encoder.
1233 ost->frame_number++;
1235 if (vstats_filename && frame_size)
1236 do_video_stats(ost, frame_size);
1239 if (!ost->last_frame)
1240 ost->last_frame = av_frame_alloc();
1241 av_frame_unref(ost->last_frame);
1242 if (next_picture && ost->last_frame)
1243 av_frame_ref(ost->last_frame, next_picture);
1245 av_frame_free(&ost->last_frame);
1248 static double psnr(double d)
1250 return -10.0 * log(d) / log(10.0);
1253 static void do_video_stats(OutputStream *ost, int frame_size)
1255 AVCodecContext *enc;
1257 double ti1, bitrate, avg_bitrate;
1259 /* this is executed just the first time do_video_stats is called */
1261 vstats_file = fopen(vstats_filename, "w");
1269 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1270 frame_number = ost->st->nb_frames;
1271 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1272 ost->quality / (float)FF_QP2LAMBDA);
1274 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1275 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1277 fprintf(vstats_file,"f_size= %6d ", frame_size);
1278 /* compute pts value */
1279 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1283 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1284 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1285 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1286 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1287 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1291 static void finish_output_stream(OutputStream *ost)
1293 OutputFile *of = output_files[ost->file_index];
1296 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1299 for (i = 0; i < of->ctx->nb_streams; i++)
1300 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1305 * Get and encode new output from any of the filtergraphs, without causing
1308 * @return 0 for success, <0 for severe errors
1310 static int reap_filters(int flush)
1312 AVFrame *filtered_frame = NULL;
1315 /* Reap all buffers present in the buffer sinks */
1316 for (i = 0; i < nb_output_streams; i++) {
1317 OutputStream *ost = output_streams[i];
1318 OutputFile *of = output_files[ost->file_index];
1319 AVFilterContext *filter;
1320 AVCodecContext *enc = ost->enc_ctx;
1325 filter = ost->filter->filter;
1327 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1328 return AVERROR(ENOMEM);
1330 filtered_frame = ost->filtered_frame;
1333 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1334 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1335 AV_BUFFERSINK_FLAG_NO_REQUEST);
1337 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1338 av_log(NULL, AV_LOG_WARNING,
1339 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1340 } else if (flush && ret == AVERROR_EOF) {
1341 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1342 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1346 if (ost->finished) {
1347 av_frame_unref(filtered_frame);
1350 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1351 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1352 AVRational tb = enc->time_base;
1353 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1355 tb.den <<= extra_bits;
1357 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1358 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1359 float_pts /= 1 << extra_bits;
1360 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1361 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1363 filtered_frame->pts =
1364 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1365 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1367 //if (ost->source_index >= 0)
1368 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1370 switch (filter->inputs[0]->type) {
1371 case AVMEDIA_TYPE_VIDEO:
1372 if (!ost->frame_aspect_ratio.num)
1373 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1376 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1377 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1379 enc->time_base.num, enc->time_base.den);
1382 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1384 case AVMEDIA_TYPE_AUDIO:
1385 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1386 enc->channels != av_frame_get_channels(filtered_frame)) {
1387 av_log(NULL, AV_LOG_ERROR,
1388 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1391 do_audio_out(of->ctx, ost, filtered_frame);
1394 // TODO support subtitle filters
1398 av_frame_unref(filtered_frame);
1405 static void print_final_stats(int64_t total_size)
1407 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1408 uint64_t subtitle_size = 0;
1409 uint64_t data_size = 0;
1410 float percent = -1.0;
1414 for (i = 0; i < nb_output_streams; i++) {
1415 OutputStream *ost = output_streams[i];
1416 switch (ost->enc_ctx->codec_type) {
1417 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1418 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1419 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1420 default: other_size += ost->data_size; break;
1422 extra_size += ost->enc_ctx->extradata_size;
1423 data_size += ost->data_size;
1424 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1425 != AV_CODEC_FLAG_PASS1)
1429 if (data_size && total_size>0 && total_size >= data_size)
1430 percent = 100.0 * (total_size - data_size) / data_size;
1432 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1433 video_size / 1024.0,
1434 audio_size / 1024.0,
1435 subtitle_size / 1024.0,
1436 other_size / 1024.0,
1437 extra_size / 1024.0);
1439 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1441 av_log(NULL, AV_LOG_INFO, "unknown");
1442 av_log(NULL, AV_LOG_INFO, "\n");
1444 /* print verbose per-stream stats */
1445 for (i = 0; i < nb_input_files; i++) {
1446 InputFile *f = input_files[i];
1447 uint64_t total_packets = 0, total_size = 0;
1449 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1450 i, f->ctx->filename);
1452 for (j = 0; j < f->nb_streams; j++) {
1453 InputStream *ist = input_streams[f->ist_index + j];
1454 enum AVMediaType type = ist->dec_ctx->codec_type;
1456 total_size += ist->data_size;
1457 total_packets += ist->nb_packets;
1459 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1460 i, j, media_type_string(type));
1461 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1462 ist->nb_packets, ist->data_size);
1464 if (ist->decoding_needed) {
1465 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1466 ist->frames_decoded);
1467 if (type == AVMEDIA_TYPE_AUDIO)
1468 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1469 av_log(NULL, AV_LOG_VERBOSE, "; ");
1472 av_log(NULL, AV_LOG_VERBOSE, "\n");
1475 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1476 total_packets, total_size);
1479 for (i = 0; i < nb_output_files; i++) {
1480 OutputFile *of = output_files[i];
1481 uint64_t total_packets = 0, total_size = 0;
1483 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1484 i, of->ctx->filename);
1486 for (j = 0; j < of->ctx->nb_streams; j++) {
1487 OutputStream *ost = output_streams[of->ost_index + j];
1488 enum AVMediaType type = ost->enc_ctx->codec_type;
1490 total_size += ost->data_size;
1491 total_packets += ost->packets_written;
1493 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1494 i, j, media_type_string(type));
1495 if (ost->encoding_needed) {
1496 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1497 ost->frames_encoded);
1498 if (type == AVMEDIA_TYPE_AUDIO)
1499 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1500 av_log(NULL, AV_LOG_VERBOSE, "; ");
1503 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1504 ost->packets_written, ost->data_size);
1506 av_log(NULL, AV_LOG_VERBOSE, "\n");
1509 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1510 total_packets, total_size);
1512 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1513 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1515 av_log(NULL, AV_LOG_WARNING, "\n");
1517 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1522 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1525 AVBPrint buf_script;
1527 AVFormatContext *oc;
1529 AVCodecContext *enc;
1530 int frame_number, vid, i;
1532 int64_t pts = INT64_MIN;
1533 static int64_t last_time = -1;
1534 static int qp_histogram[52];
1535 int hours, mins, secs, us;
1537 if (!print_stats && !is_last_report && !progress_avio)
1540 if (!is_last_report) {
1541 if (last_time == -1) {
1542 last_time = cur_time;
1545 if ((cur_time - last_time) < 500000)
1547 last_time = cur_time;
1551 oc = output_files[0]->ctx;
1553 total_size = avio_size(oc->pb);
1554 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1555 total_size = avio_tell(oc->pb);
1559 av_bprint_init(&buf_script, 0, 1);
1560 for (i = 0; i < nb_output_streams; i++) {
1562 ost = output_streams[i];
1564 if (!ost->stream_copy)
1565 q = ost->quality / (float) FF_QP2LAMBDA;
1567 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1568 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1569 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1570 ost->file_index, ost->index, q);
1572 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1573 float fps, t = (cur_time-timer_start) / 1000000.0;
1575 frame_number = ost->frame_number;
1576 fps = t > 1 ? frame_number / t : 0;
1577 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1578 frame_number, fps < 9.95, fps, q);
1579 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1580 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1581 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1582 ost->file_index, ost->index, q);
1584 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1588 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1590 for (j = 0; j < 32; j++)
1591 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1594 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1596 double error, error_sum = 0;
1597 double scale, scale_sum = 0;
1599 char type[3] = { 'Y','U','V' };
1600 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1601 for (j = 0; j < 3; j++) {
1602 if (is_last_report) {
1603 error = enc->error[j];
1604 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1606 error = ost->error[j];
1607 scale = enc->width * enc->height * 255.0 * 255.0;
1613 p = psnr(error / scale);
1614 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1615 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1616 ost->file_index, ost->index, type[j] | 32, p);
1618 p = psnr(error_sum / scale_sum);
1619 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1620 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1621 ost->file_index, ost->index, p);
1625 /* compute min output value */
1626 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1627 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1628 ost->st->time_base, AV_TIME_BASE_Q));
1630 nb_frames_drop += ost->last_droped;
1633 secs = FFABS(pts) / AV_TIME_BASE;
1634 us = FFABS(pts) % AV_TIME_BASE;
1640 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1642 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1644 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1645 "size=%8.0fkB time=", total_size / 1024.0);
1647 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1648 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1649 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1650 (100 * us) / AV_TIME_BASE);
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1654 av_bprintf(&buf_script, "bitrate=N/A\n");
1656 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1657 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1660 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1661 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1662 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1663 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1664 hours, mins, secs, us);
1666 if (nb_frames_dup || nb_frames_drop)
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1668 nb_frames_dup, nb_frames_drop);
1669 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1670 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1672 if (print_stats || is_last_report) {
1673 const char end = is_last_report ? '\n' : '\r';
1674 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1675 fprintf(stderr, "%s %c", buf, end);
1677 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1682 if (progress_avio) {
1683 av_bprintf(&buf_script, "progress=%s\n",
1684 is_last_report ? "end" : "continue");
1685 avio_write(progress_avio, buf_script.str,
1686 FFMIN(buf_script.len, buf_script.size - 1));
1687 avio_flush(progress_avio);
1688 av_bprint_finalize(&buf_script, NULL);
1689 if (is_last_report) {
1690 avio_closep(&progress_avio);
1695 print_final_stats(total_size);
1698 static void flush_encoders(void)
1702 for (i = 0; i < nb_output_streams; i++) {
1703 OutputStream *ost = output_streams[i];
1704 AVCodecContext *enc = ost->enc_ctx;
1705 AVFormatContext *os = output_files[ost->file_index]->ctx;
1706 int stop_encoding = 0;
1708 if (!ost->encoding_needed)
1711 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1713 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1717 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1720 switch (enc->codec_type) {
1721 case AVMEDIA_TYPE_AUDIO:
1722 encode = avcodec_encode_audio2;
1725 case AVMEDIA_TYPE_VIDEO:
1726 encode = avcodec_encode_video2;
1737 av_init_packet(&pkt);
1741 update_benchmark(NULL);
1742 ret = encode(enc, &pkt, NULL, &got_packet);
1743 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1745 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1750 if (ost->logfile && enc->stats_out) {
1751 fprintf(ost->logfile, "%s", enc->stats_out);
1757 if (ost->finished & MUXER_FINISHED) {
1758 av_free_packet(&pkt);
1761 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1762 pkt_size = pkt.size;
1763 write_frame(os, &pkt, ost);
1764 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1765 do_video_stats(ost, pkt_size);
1776 * Check whether a packet from ist should be written into ost at this time
1778 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1780 OutputFile *of = output_files[ost->file_index];
1781 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1783 if (ost->source_index != ist_index)
1789 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1795 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1797 OutputFile *of = output_files[ost->file_index];
1798 InputFile *f = input_files [ist->file_index];
1799 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1800 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1801 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1805 av_init_packet(&opkt);
1807 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1808 !ost->copy_initial_nonkeyframes)
1811 if (pkt->pts == AV_NOPTS_VALUE) {
1812 if (!ost->frame_number && ist->pts < start_time &&
1813 !ost->copy_prior_start)
1816 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1817 !ost->copy_prior_start)
1821 if (of->recording_time != INT64_MAX &&
1822 ist->pts >= of->recording_time + start_time) {
1823 close_output_stream(ost);
1827 if (f->recording_time != INT64_MAX) {
1828 start_time = f->ctx->start_time;
1829 if (f->start_time != AV_NOPTS_VALUE)
1830 start_time += f->start_time;
1831 if (ist->pts >= f->recording_time + start_time) {
1832 close_output_stream(ost);
1837 /* force the input stream PTS */
1838 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1841 if (pkt->pts != AV_NOPTS_VALUE)
1842 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1844 opkt.pts = AV_NOPTS_VALUE;
1846 if (pkt->dts == AV_NOPTS_VALUE)
1847 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1849 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1850 opkt.dts -= ost_tb_start_time;
1852 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1853 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1855 duration = ist->dec_ctx->frame_size;
1856 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1857 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1858 ost->st->time_base) - ost_tb_start_time;
1861 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1862 opkt.flags = pkt->flags;
1863 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1864 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1865 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1866 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1867 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1869 int ret = av_parser_change(ost->parser, ost->st->codec,
1870 &opkt.data, &opkt.size,
1871 pkt->data, pkt->size,
1872 pkt->flags & AV_PKT_FLAG_KEY);
1874 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1879 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1884 opkt.data = pkt->data;
1885 opkt.size = pkt->size;
1887 av_copy_packet_side_data(&opkt, pkt);
1889 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1890 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1891 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1892 /* store AVPicture in AVPacket, as expected by the output format */
1893 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1895 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1899 opkt.data = (uint8_t *)&pict;
1900 opkt.size = sizeof(AVPicture);
1901 opkt.flags |= AV_PKT_FLAG_KEY;
1904 write_frame(of->ctx, &opkt, ost);
1907 int guess_input_channel_layout(InputStream *ist)
1909 AVCodecContext *dec = ist->dec_ctx;
1911 if (!dec->channel_layout) {
1912 char layout_name[256];
1914 if (dec->channels > ist->guess_layout_max)
1916 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1917 if (!dec->channel_layout)
1919 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1920 dec->channels, dec->channel_layout);
1921 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1922 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1927 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1929 AVFrame *decoded_frame, *f;
1930 AVCodecContext *avctx = ist->dec_ctx;
1931 int i, ret, err = 0, resample_changed;
1932 AVRational decoded_frame_tb;
1934 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1935 return AVERROR(ENOMEM);
1936 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1937 return AVERROR(ENOMEM);
1938 decoded_frame = ist->decoded_frame;
1940 update_benchmark(NULL);
1941 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1942 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1944 if (ret >= 0 && avctx->sample_rate <= 0) {
1945 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1946 ret = AVERROR_INVALIDDATA;
1949 if (*got_output || ret<0)
1950 decode_error_stat[ret<0] ++;
1952 if (ret < 0 && exit_on_error)
1955 if (!*got_output || ret < 0)
1958 ist->samples_decoded += decoded_frame->nb_samples;
1959 ist->frames_decoded++;
1962 /* increment next_dts to use for the case where the input stream does not
1963 have timestamps or there are multiple frames in the packet */
1964 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1966 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1970 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1971 ist->resample_channels != avctx->channels ||
1972 ist->resample_channel_layout != decoded_frame->channel_layout ||
1973 ist->resample_sample_rate != decoded_frame->sample_rate;
1974 if (resample_changed) {
1975 char layout1[64], layout2[64];
1977 if (!guess_input_channel_layout(ist)) {
1978 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1979 "layout for Input Stream #%d.%d\n", ist->file_index,
1983 decoded_frame->channel_layout = avctx->channel_layout;
1985 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1986 ist->resample_channel_layout);
1987 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1988 decoded_frame->channel_layout);
1990 av_log(NULL, AV_LOG_INFO,
1991 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1992 ist->file_index, ist->st->index,
1993 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1994 ist->resample_channels, layout1,
1995 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1996 avctx->channels, layout2);
1998 ist->resample_sample_fmt = decoded_frame->format;
1999 ist->resample_sample_rate = decoded_frame->sample_rate;
2000 ist->resample_channel_layout = decoded_frame->channel_layout;
2001 ist->resample_channels = avctx->channels;
2003 for (i = 0; i < nb_filtergraphs; i++)
2004 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2005 FilterGraph *fg = filtergraphs[i];
2006 if (configure_filtergraph(fg) < 0) {
2007 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2013 /* if the decoder provides a pts, use it instead of the last packet pts.
2014 the decoder could be delaying output by a packet or more. */
2015 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2016 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2017 decoded_frame_tb = avctx->time_base;
2018 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2019 decoded_frame->pts = decoded_frame->pkt_pts;
2020 decoded_frame_tb = ist->st->time_base;
2021 } else if (pkt->pts != AV_NOPTS_VALUE) {
2022 decoded_frame->pts = pkt->pts;
2023 decoded_frame_tb = ist->st->time_base;
2025 decoded_frame->pts = ist->dts;
2026 decoded_frame_tb = AV_TIME_BASE_Q;
2028 pkt->pts = AV_NOPTS_VALUE;
2029 if (decoded_frame->pts != AV_NOPTS_VALUE)
2030 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2031 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2032 (AVRational){1, avctx->sample_rate});
2033 for (i = 0; i < ist->nb_filters; i++) {
2034 if (i < ist->nb_filters - 1) {
2035 f = ist->filter_frame;
2036 err = av_frame_ref(f, decoded_frame);
2041 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2042 AV_BUFFERSRC_FLAG_PUSH);
2043 if (err == AVERROR_EOF)
2044 err = 0; /* ignore */
2048 decoded_frame->pts = AV_NOPTS_VALUE;
2050 av_frame_unref(ist->filter_frame);
2051 av_frame_unref(decoded_frame);
2052 return err < 0 ? err : ret;
2055 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2057 AVFrame *decoded_frame, *f;
2058 int i, ret = 0, err = 0, resample_changed;
2059 int64_t best_effort_timestamp;
2060 AVRational *frame_sample_aspect;
2062 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2063 return AVERROR(ENOMEM);
2064 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2065 return AVERROR(ENOMEM);
2066 decoded_frame = ist->decoded_frame;
2067 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2069 update_benchmark(NULL);
2070 ret = avcodec_decode_video2(ist->dec_ctx,
2071 decoded_frame, got_output, pkt);
2072 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2074 // The following line may be required in some cases where there is no parser
2075 // or the parser does not has_b_frames correctly
2076 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2077 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2078 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2080 av_log(ist->dec_ctx, AV_LOG_WARNING,
2081 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2082 "If you want to help, upload a sample "
2083 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2084 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2085 ist->dec_ctx->has_b_frames,
2086 ist->st->codec->has_b_frames);
2089 if (*got_output || ret<0)
2090 decode_error_stat[ret<0] ++;
2092 if (ret < 0 && exit_on_error)
2095 if (*got_output && ret >= 0) {
2096 if (ist->dec_ctx->width != decoded_frame->width ||
2097 ist->dec_ctx->height != decoded_frame->height ||
2098 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2099 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2100 decoded_frame->width,
2101 decoded_frame->height,
2102 decoded_frame->format,
2103 ist->dec_ctx->width,
2104 ist->dec_ctx->height,
2105 ist->dec_ctx->pix_fmt);
2109 if (!*got_output || ret < 0)
2112 if(ist->top_field_first>=0)
2113 decoded_frame->top_field_first = ist->top_field_first;
2115 ist->frames_decoded++;
2117 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2118 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2122 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2124 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2125 if(best_effort_timestamp != AV_NOPTS_VALUE)
2126 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2129 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2130 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2131 ist->st->index, av_ts2str(decoded_frame->pts),
2132 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2133 best_effort_timestamp,
2134 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2135 decoded_frame->key_frame, decoded_frame->pict_type,
2136 ist->st->time_base.num, ist->st->time_base.den);
2141 if (ist->st->sample_aspect_ratio.num)
2142 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2144 resample_changed = ist->resample_width != decoded_frame->width ||
2145 ist->resample_height != decoded_frame->height ||
2146 ist->resample_pix_fmt != decoded_frame->format;
2147 if (resample_changed) {
2148 av_log(NULL, AV_LOG_INFO,
2149 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2150 ist->file_index, ist->st->index,
2151 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2152 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2154 ist->resample_width = decoded_frame->width;
2155 ist->resample_height = decoded_frame->height;
2156 ist->resample_pix_fmt = decoded_frame->format;
2158 for (i = 0; i < nb_filtergraphs; i++) {
2159 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2160 configure_filtergraph(filtergraphs[i]) < 0) {
2161 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2167 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2168 for (i = 0; i < ist->nb_filters; i++) {
2169 if (!frame_sample_aspect->num)
2170 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2172 if (i < ist->nb_filters - 1) {
2173 f = ist->filter_frame;
2174 err = av_frame_ref(f, decoded_frame);
2179 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2180 if (ret == AVERROR_EOF) {
2181 ret = 0; /* ignore */
2182 } else if (ret < 0) {
2183 av_log(NULL, AV_LOG_FATAL,
2184 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2190 av_frame_unref(ist->filter_frame);
2191 av_frame_unref(decoded_frame);
2192 return err < 0 ? err : ret;
2195 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2197 AVSubtitle subtitle;
2198 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2199 &subtitle, got_output, pkt);
2201 if (*got_output || ret<0)
2202 decode_error_stat[ret<0] ++;
2204 if (ret < 0 && exit_on_error)
2207 if (ret < 0 || !*got_output) {
2209 sub2video_flush(ist);
2213 if (ist->fix_sub_duration) {
2215 if (ist->prev_sub.got_output) {
2216 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2217 1000, AV_TIME_BASE);
2218 if (end < ist->prev_sub.subtitle.end_display_time) {
2219 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2220 "Subtitle duration reduced from %d to %d%s\n",
2221 ist->prev_sub.subtitle.end_display_time, end,
2222 end <= 0 ? ", dropping it" : "");
2223 ist->prev_sub.subtitle.end_display_time = end;
2226 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2227 FFSWAP(int, ret, ist->prev_sub.ret);
2228 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2236 sub2video_update(ist, &subtitle);
2238 if (!subtitle.num_rects)
2241 ist->frames_decoded++;
2243 for (i = 0; i < nb_output_streams; i++) {
2244 OutputStream *ost = output_streams[i];
2246 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2247 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2250 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2254 avsubtitle_free(&subtitle);
2258 static int send_filter_eof(InputStream *ist)
2261 for (i = 0; i < ist->nb_filters; i++) {
2262 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2269 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2270 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2276 if (!ist->saw_first_ts) {
2277 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2279 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2280 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2281 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2283 ist->saw_first_ts = 1;
2286 if (ist->next_dts == AV_NOPTS_VALUE)
2287 ist->next_dts = ist->dts;
2288 if (ist->next_pts == AV_NOPTS_VALUE)
2289 ist->next_pts = ist->pts;
2293 av_init_packet(&avpkt);
2301 if (pkt->dts != AV_NOPTS_VALUE) {
2302 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2303 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2304 ist->next_pts = ist->pts = ist->dts;
2307 // while we have more to decode or while the decoder did output something on EOF
2308 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2312 ist->pts = ist->next_pts;
2313 ist->dts = ist->next_dts;
2315 if (avpkt.size && avpkt.size != pkt->size &&
2316 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2317 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2318 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2319 ist->showed_multi_packet_warning = 1;
2322 switch (ist->dec_ctx->codec_type) {
2323 case AVMEDIA_TYPE_AUDIO:
2324 ret = decode_audio (ist, &avpkt, &got_output);
2326 case AVMEDIA_TYPE_VIDEO:
2327 ret = decode_video (ist, &avpkt, &got_output);
2328 if (avpkt.duration) {
2329 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2330 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2331 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2332 duration = ((int64_t)AV_TIME_BASE *
2333 ist->dec_ctx->framerate.den * ticks) /
2334 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2338 if(ist->dts != AV_NOPTS_VALUE && duration) {
2339 ist->next_dts += duration;
2341 ist->next_dts = AV_NOPTS_VALUE;
2344 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2346 case AVMEDIA_TYPE_SUBTITLE:
2347 ret = transcode_subtitles(ist, &avpkt, &got_output);
2354 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2355 ist->file_index, ist->st->index, av_err2str(ret));
2362 avpkt.pts= AV_NOPTS_VALUE;
2364 // touch data and size only if not EOF
2366 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2374 if (got_output && !pkt)
2378 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2379 if (!pkt && ist->decoding_needed && !got_output) {
2380 int ret = send_filter_eof(ist);
2382 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2387 /* handle stream copy */
2388 if (!ist->decoding_needed) {
2389 ist->dts = ist->next_dts;
2390 switch (ist->dec_ctx->codec_type) {
2391 case AVMEDIA_TYPE_AUDIO:
2392 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2393 ist->dec_ctx->sample_rate;
2395 case AVMEDIA_TYPE_VIDEO:
2396 if (ist->framerate.num) {
2397 // TODO: Remove work-around for c99-to-c89 issue 7
2398 AVRational time_base_q = AV_TIME_BASE_Q;
2399 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2400 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2401 } else if (pkt->duration) {
2402 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2403 } else if(ist->dec_ctx->framerate.num != 0) {
2404 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2405 ist->next_dts += ((int64_t)AV_TIME_BASE *
2406 ist->dec_ctx->framerate.den * ticks) /
2407 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2411 ist->pts = ist->dts;
2412 ist->next_pts = ist->next_dts;
2414 for (i = 0; pkt && i < nb_output_streams; i++) {
2415 OutputStream *ost = output_streams[i];
2417 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2420 do_streamcopy(ist, ost, pkt);
2426 static void print_sdp(void)
2431 AVIOContext *sdp_pb;
2432 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2436 for (i = 0, j = 0; i < nb_output_files; i++) {
2437 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2438 avc[j] = output_files[i]->ctx;
2443 av_sdp_create(avc, j, sdp, sizeof(sdp));
2445 if (!sdp_filename) {
2446 printf("SDP:\n%s\n", sdp);
2449 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2450 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2452 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2453 avio_closep(&sdp_pb);
2454 av_freep(&sdp_filename);
2461 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2464 for (i = 0; hwaccels[i].name; i++)
2465 if (hwaccels[i].pix_fmt == pix_fmt)
2466 return &hwaccels[i];
2470 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2472 InputStream *ist = s->opaque;
2473 const enum AVPixelFormat *p;
2476 for (p = pix_fmts; *p != -1; p++) {
2477 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2478 const HWAccel *hwaccel;
2480 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2483 hwaccel = get_hwaccel(*p);
2485 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2486 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2489 ret = hwaccel->init(s);
2491 if (ist->hwaccel_id == hwaccel->id) {
2492 av_log(NULL, AV_LOG_FATAL,
2493 "%s hwaccel requested for input stream #%d:%d, "
2494 "but cannot be initialized.\n", hwaccel->name,
2495 ist->file_index, ist->st->index);
2496 return AV_PIX_FMT_NONE;
2500 ist->active_hwaccel_id = hwaccel->id;
2501 ist->hwaccel_pix_fmt = *p;
2508 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2510 InputStream *ist = s->opaque;
2512 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2513 return ist->hwaccel_get_buffer(s, frame, flags);
2515 return avcodec_default_get_buffer2(s, frame, flags);
2518 static int init_input_stream(int ist_index, char *error, int error_len)
2521 InputStream *ist = input_streams[ist_index];
2523 if (ist->decoding_needed) {
2524 AVCodec *codec = ist->dec;
2526 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2527 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2528 return AVERROR(EINVAL);
2531 ist->dec_ctx->opaque = ist;
2532 ist->dec_ctx->get_format = get_format;
2533 ist->dec_ctx->get_buffer2 = get_buffer;
2534 ist->dec_ctx->thread_safe_callbacks = 1;
2536 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2537 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2538 (ist->decoding_needed & DECODING_FOR_OST)) {
2539 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2540 if (ist->decoding_needed & DECODING_FOR_FILTER)
2541 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2544 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2545 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2546 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2547 if (ret == AVERROR_EXPERIMENTAL)
2548 abort_codec_experimental(codec, 0);
2550 snprintf(error, error_len,
2551 "Error while opening decoder for input stream "
2553 ist->file_index, ist->st->index, av_err2str(ret));
2556 assert_avoptions(ist->decoder_opts);
2559 ist->next_pts = AV_NOPTS_VALUE;
2560 ist->next_dts = AV_NOPTS_VALUE;
2565 static InputStream *get_input_stream(OutputStream *ost)
2567 if (ost->source_index >= 0)
2568 return input_streams[ost->source_index];
2572 static int compare_int64(const void *a, const void *b)
2574 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2575 return va < vb ? -1 : va > vb ? +1 : 0;
2578 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2582 if (ost->encoding_needed) {
2583 AVCodec *codec = ost->enc;
2584 AVCodecContext *dec = NULL;
2587 if ((ist = get_input_stream(ost)))
2589 if (dec && dec->subtitle_header) {
2590 /* ASS code assumes this buffer is null terminated so add extra byte. */
2591 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2592 if (!ost->enc_ctx->subtitle_header)
2593 return AVERROR(ENOMEM);
2594 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2595 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2597 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2598 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2599 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2600 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2602 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2603 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2604 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2606 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2607 if (ret == AVERROR_EXPERIMENTAL)
2608 abort_codec_experimental(codec, 1);
2609 snprintf(error, error_len,
2610 "Error while opening encoder for output stream #%d:%d - "
2611 "maybe incorrect parameters such as bit_rate, rate, width or height",
2612 ost->file_index, ost->index);
2615 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2616 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2617 av_buffersink_set_frame_size(ost->filter->filter,
2618 ost->enc_ctx->frame_size);
2619 assert_avoptions(ost->encoder_opts);
2620 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2621 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2622 " It takes bits/s as argument, not kbits/s\n");
2624 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2626 av_log(NULL, AV_LOG_FATAL,
2627 "Error initializing the output stream codec context.\n");
2631 // copy timebase while removing common factors
2632 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2633 ost->st->codec->codec= ost->enc_ctx->codec;
2635 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2637 av_log(NULL, AV_LOG_FATAL,
2638 "Error setting up codec context options.\n");
2641 // copy timebase while removing common factors
2642 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2648 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2649 AVCodecContext *avctx)
2652 int n = 1, i, size, index = 0;
2655 for (p = kf; *p; p++)
2659 pts = av_malloc_array(size, sizeof(*pts));
2661 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2666 for (i = 0; i < n; i++) {
2667 char *next = strchr(p, ',');
2672 if (!memcmp(p, "chapters", 8)) {
2674 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2677 if (avf->nb_chapters > INT_MAX - size ||
2678 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2680 av_log(NULL, AV_LOG_FATAL,
2681 "Could not allocate forced key frames array.\n");
2684 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2685 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2687 for (j = 0; j < avf->nb_chapters; j++) {
2688 AVChapter *c = avf->chapters[j];
2689 av_assert1(index < size);
2690 pts[index++] = av_rescale_q(c->start, c->time_base,
2691 avctx->time_base) + t;
2696 t = parse_time_or_die("force_key_frames", p, 1);
2697 av_assert1(index < size);
2698 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2705 av_assert0(index == size);
2706 qsort(pts, size, sizeof(*pts), compare_int64);
2707 ost->forced_kf_count = size;
2708 ost->forced_kf_pts = pts;
2711 static void report_new_stream(int input_index, AVPacket *pkt)
2713 InputFile *file = input_files[input_index];
2714 AVStream *st = file->ctx->streams[pkt->stream_index];
2716 if (pkt->stream_index < file->nb_streams_warn)
2718 av_log(file->ctx, AV_LOG_WARNING,
2719 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2720 av_get_media_type_string(st->codec->codec_type),
2721 input_index, pkt->stream_index,
2722 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2723 file->nb_streams_warn = pkt->stream_index + 1;
2726 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2728 AVDictionaryEntry *e;
2730 uint8_t *encoder_string;
2731 int encoder_string_len;
2732 int format_flags = 0;
2733 int codec_flags = 0;
2735 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2738 e = av_dict_get(of->opts, "fflags", NULL, 0);
2740 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2743 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2745 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2747 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2750 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2753 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2754 encoder_string = av_mallocz(encoder_string_len);
2755 if (!encoder_string)
2758 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2759 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2761 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2762 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2763 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2764 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2767 static int transcode_init(void)
2769 int ret = 0, i, j, k;
2770 AVFormatContext *oc;
2773 char error[1024] = {0};
2776 for (i = 0; i < nb_filtergraphs; i++) {
2777 FilterGraph *fg = filtergraphs[i];
2778 for (j = 0; j < fg->nb_outputs; j++) {
2779 OutputFilter *ofilter = fg->outputs[j];
2780 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2782 if (fg->nb_inputs != 1)
2784 for (k = nb_input_streams-1; k >= 0 ; k--)
2785 if (fg->inputs[0]->ist == input_streams[k])
2787 ofilter->ost->source_index = k;
2791 /* init framerate emulation */
2792 for (i = 0; i < nb_input_files; i++) {
2793 InputFile *ifile = input_files[i];
2794 if (ifile->rate_emu)
2795 for (j = 0; j < ifile->nb_streams; j++)
2796 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2799 /* for each output stream, we compute the right encoding parameters */
2800 for (i = 0; i < nb_output_streams; i++) {
2801 AVCodecContext *enc_ctx;
2802 AVCodecContext *dec_ctx = NULL;
2803 ost = output_streams[i];
2804 oc = output_files[ost->file_index]->ctx;
2805 ist = get_input_stream(ost);
2807 if (ost->attachment_filename)
2810 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2813 dec_ctx = ist->dec_ctx;
2815 ost->st->disposition = ist->st->disposition;
2816 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2817 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2819 for (j=0; j<oc->nb_streams; j++) {
2820 AVStream *st = oc->streams[j];
2821 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2824 if (j == oc->nb_streams)
2825 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2826 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2829 if (ost->stream_copy) {
2831 uint64_t extra_size;
2833 av_assert0(ist && !ost->filter);
2835 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2837 if (extra_size > INT_MAX) {
2838 return AVERROR(EINVAL);
2841 /* if stream_copy is selected, no need to decode or encode */
2842 enc_ctx->codec_id = dec_ctx->codec_id;
2843 enc_ctx->codec_type = dec_ctx->codec_type;
2845 if (!enc_ctx->codec_tag) {
2846 unsigned int codec_tag;
2847 if (!oc->oformat->codec_tag ||
2848 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2849 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2850 enc_ctx->codec_tag = dec_ctx->codec_tag;
2853 enc_ctx->bit_rate = dec_ctx->bit_rate;
2854 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2855 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2856 enc_ctx->field_order = dec_ctx->field_order;
2857 if (dec_ctx->extradata_size) {
2858 enc_ctx->extradata = av_mallocz(extra_size);
2859 if (!enc_ctx->extradata) {
2860 return AVERROR(ENOMEM);
2862 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2864 enc_ctx->extradata_size= dec_ctx->extradata_size;
2865 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2867 enc_ctx->time_base = ist->st->time_base;
2869 * Avi is a special case here because it supports variable fps but
2870 * having the fps and timebase differe significantly adds quite some
2873 if(!strcmp(oc->oformat->name, "avi")) {
2874 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2875 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2876 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2877 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2879 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2880 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2881 enc_ctx->ticks_per_frame = 2;
2882 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2883 && av_q2d(ist->st->time_base) < 1.0/500
2885 enc_ctx->time_base = dec_ctx->time_base;
2886 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2887 enc_ctx->time_base.den *= 2;
2888 enc_ctx->ticks_per_frame = 2;
2890 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2891 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2892 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2893 && strcmp(oc->oformat->name, "f4v")
2895 if( copy_tb<0 && dec_ctx->time_base.den
2896 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2897 && av_q2d(ist->st->time_base) < 1.0/500
2899 enc_ctx->time_base = dec_ctx->time_base;
2900 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2903 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2904 && dec_ctx->time_base.num < dec_ctx->time_base.den
2905 && dec_ctx->time_base.num > 0
2906 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2907 enc_ctx->time_base = dec_ctx->time_base;
2910 if (!ost->frame_rate.num)
2911 ost->frame_rate = ist->framerate;
2912 if(ost->frame_rate.num)
2913 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2915 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2916 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2918 if (ist->st->nb_side_data) {
2919 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2920 sizeof(*ist->st->side_data));
2921 if (!ost->st->side_data)
2922 return AVERROR(ENOMEM);
2924 ost->st->nb_side_data = 0;
2925 for (j = 0; j < ist->st->nb_side_data; j++) {
2926 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2927 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2929 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2932 sd_dst->data = av_malloc(sd_src->size);
2934 return AVERROR(ENOMEM);
2935 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2936 sd_dst->size = sd_src->size;
2937 sd_dst->type = sd_src->type;
2938 ost->st->nb_side_data++;
2942 ost->parser = av_parser_init(enc_ctx->codec_id);
2944 switch (enc_ctx->codec_type) {
2945 case AVMEDIA_TYPE_AUDIO:
2946 if (audio_volume != 256) {
2947 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2950 enc_ctx->channel_layout = dec_ctx->channel_layout;
2951 enc_ctx->sample_rate = dec_ctx->sample_rate;
2952 enc_ctx->channels = dec_ctx->channels;
2953 enc_ctx->frame_size = dec_ctx->frame_size;
2954 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2955 enc_ctx->block_align = dec_ctx->block_align;
2956 enc_ctx->initial_padding = dec_ctx->delay;
2957 #if FF_API_AUDIOENC_DELAY
2958 enc_ctx->delay = dec_ctx->delay;
2960 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2961 enc_ctx->block_align= 0;
2962 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2963 enc_ctx->block_align= 0;
2965 case AVMEDIA_TYPE_VIDEO:
2966 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2967 enc_ctx->width = dec_ctx->width;
2968 enc_ctx->height = dec_ctx->height;
2969 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2970 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2972 av_mul_q(ost->frame_aspect_ratio,
2973 (AVRational){ enc_ctx->height, enc_ctx->width });
2974 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2975 "with stream copy may produce invalid files\n");
2977 else if (ist->st->sample_aspect_ratio.num)
2978 sar = ist->st->sample_aspect_ratio;
2980 sar = dec_ctx->sample_aspect_ratio;
2981 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2982 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2983 ost->st->r_frame_rate = ist->st->r_frame_rate;
2985 case AVMEDIA_TYPE_SUBTITLE:
2986 enc_ctx->width = dec_ctx->width;
2987 enc_ctx->height = dec_ctx->height;
2989 case AVMEDIA_TYPE_UNKNOWN:
2990 case AVMEDIA_TYPE_DATA:
2991 case AVMEDIA_TYPE_ATTACHMENT:
2998 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3000 /* should only happen when a default codec is not present. */
3001 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3002 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3003 ret = AVERROR(EINVAL);
3007 set_encoder_id(output_files[ost->file_index], ost);
3010 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3011 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3013 fg = init_simple_filtergraph(ist, ost);
3014 if (configure_filtergraph(fg)) {
3015 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3020 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3021 if (!ost->frame_rate.num)
3022 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3023 if (ist && !ost->frame_rate.num)
3024 ost->frame_rate = ist->framerate;
3025 if (ist && !ost->frame_rate.num)
3026 ost->frame_rate = ist->st->r_frame_rate;
3027 if (ist && !ost->frame_rate.num) {
3028 ost->frame_rate = (AVRational){25, 1};
3029 av_log(NULL, AV_LOG_WARNING,
3031 "about the input framerate is available. Falling "
3032 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3033 "if you want a different framerate.\n",
3034 ost->file_index, ost->index);
3036 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3037 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3038 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3039 ost->frame_rate = ost->enc->supported_framerates[idx];
3041 // reduce frame rate for mpeg4 to be within the spec limits
3042 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3043 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3044 ost->frame_rate.num, ost->frame_rate.den, 65535);
3048 switch (enc_ctx->codec_type) {
3049 case AVMEDIA_TYPE_AUDIO:
3050 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3051 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3052 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3053 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3054 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3056 case AVMEDIA_TYPE_VIDEO:
3057 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3058 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3059 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3060 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3061 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3062 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3063 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3065 for (j = 0; j < ost->forced_kf_count; j++)
3066 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3068 enc_ctx->time_base);
3070 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3071 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3072 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3073 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3074 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3075 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3076 if (!strncmp(ost->enc->name, "libx264", 7) &&
3077 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3078 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3079 av_log(NULL, AV_LOG_WARNING,
3080 "No pixel format specified, %s for H.264 encoding chosen.\n"
3081 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3082 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3083 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3084 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3085 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3086 av_log(NULL, AV_LOG_WARNING,
3087 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3088 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3089 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3090 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3092 ost->st->avg_frame_rate = ost->frame_rate;
3095 enc_ctx->width != dec_ctx->width ||
3096 enc_ctx->height != dec_ctx->height ||
3097 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3098 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3101 if (ost->forced_keyframes) {
3102 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3103 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3104 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3106 av_log(NULL, AV_LOG_ERROR,
3107 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3110 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3111 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3112 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3113 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3115 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3116 // parse it only for static kf timings
3117 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3118 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3122 case AVMEDIA_TYPE_SUBTITLE:
3123 enc_ctx->time_base = (AVRational){1, 1000};
3124 if (!enc_ctx->width) {
3125 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3126 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3129 case AVMEDIA_TYPE_DATA:
3137 if (ost->disposition) {
3138 static const AVOption opts[] = {
3139 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3140 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3141 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3142 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3143 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3144 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3145 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3146 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3147 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3148 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3149 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3150 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3151 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3152 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3155 static const AVClass class = {
3157 .item_name = av_default_item_name,
3159 .version = LIBAVUTIL_VERSION_INT,
3161 const AVClass *pclass = &class;
3163 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3169 /* open each encoder */
3170 for (i = 0; i < nb_output_streams; i++) {
3171 ret = init_output_stream(output_streams[i], error, sizeof(error));
3176 /* init input streams */
3177 for (i = 0; i < nb_input_streams; i++)
3178 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3179 for (i = 0; i < nb_output_streams; i++) {
3180 ost = output_streams[i];
3181 avcodec_close(ost->enc_ctx);
3186 /* discard unused programs */
3187 for (i = 0; i < nb_input_files; i++) {
3188 InputFile *ifile = input_files[i];
3189 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3190 AVProgram *p = ifile->ctx->programs[j];
3191 int discard = AVDISCARD_ALL;
3193 for (k = 0; k < p->nb_stream_indexes; k++)
3194 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3195 discard = AVDISCARD_DEFAULT;
3198 p->discard = discard;
3202 /* open files and write file headers */
3203 for (i = 0; i < nb_output_files; i++) {
3204 oc = output_files[i]->ctx;
3205 oc->interrupt_callback = int_cb;
3206 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3207 snprintf(error, sizeof(error),
3208 "Could not write header for output file #%d "
3209 "(incorrect codec parameters ?): %s",
3210 i, av_err2str(ret));
3211 ret = AVERROR(EINVAL);
3214 // assert_avoptions(output_files[i]->opts);
3215 if (strcmp(oc->oformat->name, "rtp")) {
3221 /* dump the file output parameters - cannot be done before in case
3223 for (i = 0; i < nb_output_files; i++) {
3224 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3227 /* dump the stream mapping */
3228 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3229 for (i = 0; i < nb_input_streams; i++) {
3230 ist = input_streams[i];
3232 for (j = 0; j < ist->nb_filters; j++) {
3233 if (ist->filters[j]->graph->graph_desc) {
3234 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3235 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3236 ist->filters[j]->name);
3237 if (nb_filtergraphs > 1)
3238 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3239 av_log(NULL, AV_LOG_INFO, "\n");
3244 for (i = 0; i < nb_output_streams; i++) {
3245 ost = output_streams[i];
3247 if (ost->attachment_filename) {
3248 /* an attached file */
3249 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3250 ost->attachment_filename, ost->file_index, ost->index);
3254 if (ost->filter && ost->filter->graph->graph_desc) {
3255 /* output from a complex graph */
3256 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3257 if (nb_filtergraphs > 1)
3258 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3260 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3261 ost->index, ost->enc ? ost->enc->name : "?");
3265 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3266 input_streams[ost->source_index]->file_index,
3267 input_streams[ost->source_index]->st->index,
3270 if (ost->sync_ist != input_streams[ost->source_index])
3271 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3272 ost->sync_ist->file_index,
3273 ost->sync_ist->st->index);
3274 if (ost->stream_copy)
3275 av_log(NULL, AV_LOG_INFO, " (copy)");
3277 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3278 const AVCodec *out_codec = ost->enc;
3279 const char *decoder_name = "?";
3280 const char *in_codec_name = "?";
3281 const char *encoder_name = "?";
3282 const char *out_codec_name = "?";
3283 const AVCodecDescriptor *desc;
3286 decoder_name = in_codec->name;
3287 desc = avcodec_descriptor_get(in_codec->id);
3289 in_codec_name = desc->name;
3290 if (!strcmp(decoder_name, in_codec_name))
3291 decoder_name = "native";
3295 encoder_name = out_codec->name;
3296 desc = avcodec_descriptor_get(out_codec->id);
3298 out_codec_name = desc->name;
3299 if (!strcmp(encoder_name, out_codec_name))
3300 encoder_name = "native";
3303 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3304 in_codec_name, decoder_name,
3305 out_codec_name, encoder_name);
3307 av_log(NULL, AV_LOG_INFO, "\n");
3311 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3315 if (sdp_filename || want_sdp) {
3319 transcode_init_done = 1;
3324 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3325 static int need_output(void)
3329 for (i = 0; i < nb_output_streams; i++) {
3330 OutputStream *ost = output_streams[i];
3331 OutputFile *of = output_files[ost->file_index];
3332 AVFormatContext *os = output_files[ost->file_index]->ctx;
3334 if (ost->finished ||
3335 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3337 if (ost->frame_number >= ost->max_frames) {
3339 for (j = 0; j < of->ctx->nb_streams; j++)
3340 close_output_stream(output_streams[of->ost_index + j]);
3351 * Select the output stream to process.
3353 * @return selected output stream, or NULL if none available
3355 static OutputStream *choose_output(void)
3358 int64_t opts_min = INT64_MAX;
3359 OutputStream *ost_min = NULL;
3361 for (i = 0; i < nb_output_streams; i++) {
3362 OutputStream *ost = output_streams[i];
3363 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3365 if (!ost->finished && opts < opts_min) {
3367 ost_min = ost->unavailable ? NULL : ost;
3373 static int check_keyboard_interaction(int64_t cur_time)
3376 static int64_t last_time;
3377 if (received_nb_signals)
3378 return AVERROR_EXIT;
3379 /* read_key() returns 0 on EOF */
3380 if(cur_time - last_time >= 100000 && !run_as_daemon){
3382 last_time = cur_time;
3386 return AVERROR_EXIT;
3387 if (key == '+') av_log_set_level(av_log_get_level()+10);
3388 if (key == '-') av_log_set_level(av_log_get_level()-10);
3389 if (key == 's') qp_hist ^= 1;
3392 do_hex_dump = do_pkt_dump = 0;
3393 } else if(do_pkt_dump){
3397 av_log_set_level(AV_LOG_DEBUG);
3399 if (key == 'c' || key == 'C'){
3400 char buf[4096], target[64], command[256], arg[256] = {0};
3403 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3405 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3410 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3411 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3412 target, time, command, arg);
3413 for (i = 0; i < nb_filtergraphs; i++) {
3414 FilterGraph *fg = filtergraphs[i];
3417 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3418 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3419 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3420 } else if (key == 'c') {
3421 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3422 ret = AVERROR_PATCHWELCOME;
3424 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3426 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3431 av_log(NULL, AV_LOG_ERROR,
3432 "Parse error, at least 3 arguments were expected, "
3433 "only %d given in string '%s'\n", n, buf);
3436 if (key == 'd' || key == 'D'){
3439 debug = input_streams[0]->st->codec->debug<<1;
3440 if(!debug) debug = 1;
3441 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3447 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3451 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3452 fprintf(stderr,"error parsing debug value\n");
3454 for(i=0;i<nb_input_streams;i++) {
3455 input_streams[i]->st->codec->debug = debug;
3457 for(i=0;i<nb_output_streams;i++) {
3458 OutputStream *ost = output_streams[i];
3459 ost->enc_ctx->debug = debug;
3461 if(debug) av_log_set_level(AV_LOG_DEBUG);
3462 fprintf(stderr,"debug=%d\n", debug);
3465 fprintf(stderr, "key function\n"
3466 "? show this help\n"
3467 "+ increase verbosity\n"
3468 "- decrease verbosity\n"
3469 "c Send command to first matching filter supporting it\n"
3470 "C Send/Que command to all matching filters\n"
3471 "D cycle through available debug modes\n"
3472 "h dump packets/hex press to cycle through the 3 states\n"
3474 "s Show QP histogram\n"
3481 static void *input_thread(void *arg)
3484 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3489 ret = av_read_frame(f->ctx, &pkt);
3491 if (ret == AVERROR(EAGAIN)) {
3496 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3499 av_dup_packet(&pkt);
3500 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3501 if (flags && ret == AVERROR(EAGAIN)) {
3503 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3504 av_log(f->ctx, AV_LOG_WARNING,
3505 "Thread message queue blocking; consider raising the "
3506 "thread_queue_size option (current value: %d)\n",
3507 f->thread_queue_size);
3510 if (ret != AVERROR_EOF)
3511 av_log(f->ctx, AV_LOG_ERROR,
3512 "Unable to send packet to main thread: %s\n",
3514 av_free_packet(&pkt);
3515 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3523 static void free_input_threads(void)
3527 for (i = 0; i < nb_input_files; i++) {
3528 InputFile *f = input_files[i];
3531 if (!f || !f->in_thread_queue)
3533 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3534 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3535 av_free_packet(&pkt);
3537 pthread_join(f->thread, NULL);
3539 av_thread_message_queue_free(&f->in_thread_queue);
3543 static int init_input_threads(void)
3547 if (nb_input_files == 1)
3550 for (i = 0; i < nb_input_files; i++) {
3551 InputFile *f = input_files[i];
3553 if (f->ctx->pb ? !f->ctx->pb->seekable :
3554 strcmp(f->ctx->iformat->name, "lavfi"))
3555 f->non_blocking = 1;
3556 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3557 f->thread_queue_size, sizeof(AVPacket));
3561 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3562 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3563 av_thread_message_queue_free(&f->in_thread_queue);
3564 return AVERROR(ret);
3570 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3572 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3574 AV_THREAD_MESSAGE_NONBLOCK : 0);
3578 static int get_input_packet(InputFile *f, AVPacket *pkt)
3582 for (i = 0; i < f->nb_streams; i++) {
3583 InputStream *ist = input_streams[f->ist_index + i];
3584 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3585 int64_t now = av_gettime_relative() - ist->start;
3587 return AVERROR(EAGAIN);
3592 if (nb_input_files > 1)
3593 return get_input_packet_mt(f, pkt);
3595 return av_read_frame(f->ctx, pkt);
3598 static int got_eagain(void)
3601 for (i = 0; i < nb_output_streams; i++)
3602 if (output_streams[i]->unavailable)
3607 static void reset_eagain(void)
3610 for (i = 0; i < nb_input_files; i++)
3611 input_files[i]->eagain = 0;
3612 for (i = 0; i < nb_output_streams; i++)
3613 output_streams[i]->unavailable = 0;
3618 * - 0 -- one packet was read and processed
3619 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3620 * this function should be called again
3621 * - AVERROR_EOF -- this function should not be called again
3623 static int process_input(int file_index)
3625 InputFile *ifile = input_files[file_index];
3626 AVFormatContext *is;
3632 ret = get_input_packet(ifile, &pkt);
3634 if (ret == AVERROR(EAGAIN)) {
3639 if (ret != AVERROR_EOF) {
3640 print_error(is->filename, ret);
3645 for (i = 0; i < ifile->nb_streams; i++) {
3646 ist = input_streams[ifile->ist_index + i];
3647 if (ist->decoding_needed) {
3648 ret = process_input_packet(ist, NULL);
3653 /* mark all outputs that don't go through lavfi as finished */
3654 for (j = 0; j < nb_output_streams; j++) {
3655 OutputStream *ost = output_streams[j];
3657 if (ost->source_index == ifile->ist_index + i &&
3658 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3659 finish_output_stream(ost);
3663 ifile->eof_reached = 1;
3664 return AVERROR(EAGAIN);
3670 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3671 is->streams[pkt.stream_index]);
3673 /* the following test is needed in case new streams appear
3674 dynamically in stream : we ignore them */
3675 if (pkt.stream_index >= ifile->nb_streams) {
3676 report_new_stream(file_index, &pkt);
3677 goto discard_packet;
3680 ist = input_streams[ifile->ist_index + pkt.stream_index];
3682 ist->data_size += pkt.size;
3686 goto discard_packet;
3689 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3690 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3691 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3692 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3693 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3694 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3695 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3696 av_ts2str(input_files[ist->file_index]->ts_offset),
3697 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3700 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3701 int64_t stime, stime2;
3702 // Correcting starttime based on the enabled streams
3703 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3704 // so we instead do it here as part of discontinuity handling
3705 if ( ist->next_dts == AV_NOPTS_VALUE
3706 && ifile->ts_offset == -is->start_time
3707 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3708 int64_t new_start_time = INT64_MAX;
3709 for (i=0; i<is->nb_streams; i++) {
3710 AVStream *st = is->streams[i];
3711 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3713 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3715 if (new_start_time > is->start_time) {
3716 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3717 ifile->ts_offset = -new_start_time;
3721 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3722 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3723 ist->wrap_correction_done = 1;
3725 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3726 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3727 ist->wrap_correction_done = 0;
3729 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3730 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3731 ist->wrap_correction_done = 0;
3735 /* add the stream-global side data to the first packet */
3736 if (ist->nb_packets == 1) {
3737 if (ist->st->nb_side_data)
3738 av_packet_split_side_data(&pkt);
3739 for (i = 0; i < ist->st->nb_side_data; i++) {
3740 AVPacketSideData *src_sd = &ist->st->side_data[i];
3743 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3745 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3748 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3752 memcpy(dst_data, src_sd->data, src_sd->size);
3756 if (pkt.dts != AV_NOPTS_VALUE)
3757 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3758 if (pkt.pts != AV_NOPTS_VALUE)
3759 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3761 if (pkt.pts != AV_NOPTS_VALUE)
3762 pkt.pts *= ist->ts_scale;
3763 if (pkt.dts != AV_NOPTS_VALUE)
3764 pkt.dts *= ist->ts_scale;
3766 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3767 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3768 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3769 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3770 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3771 int64_t delta = pkt_dts - ifile->last_ts;
3772 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3773 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3774 ifile->ts_offset -= delta;
3775 av_log(NULL, AV_LOG_DEBUG,
3776 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3777 delta, ifile->ts_offset);
3778 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3779 if (pkt.pts != AV_NOPTS_VALUE)
3780 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3784 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3785 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3786 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3788 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3789 int64_t delta = pkt_dts - ist->next_dts;
3790 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3791 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3792 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3793 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3794 ifile->ts_offset -= delta;
3795 av_log(NULL, AV_LOG_DEBUG,
3796 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3797 delta, ifile->ts_offset);
3798 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3799 if (pkt.pts != AV_NOPTS_VALUE)
3800 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3803 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3804 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3805 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3806 pkt.dts = AV_NOPTS_VALUE;
3808 if (pkt.pts != AV_NOPTS_VALUE){
3809 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3810 delta = pkt_pts - ist->next_dts;
3811 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3812 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3813 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3814 pkt.pts = AV_NOPTS_VALUE;
3820 if (pkt.dts != AV_NOPTS_VALUE)
3821 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3824 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3825 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3826 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3827 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3828 av_ts2str(input_files[ist->file_index]->ts_offset),
3829 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3832 sub2video_heartbeat(ist, pkt.pts);
3834 process_input_packet(ist, &pkt);
3837 av_free_packet(&pkt);
3843 * Perform a step of transcoding for the specified filter graph.
3845 * @param[in] graph filter graph to consider
3846 * @param[out] best_ist input stream where a frame would allow to continue
3847 * @return 0 for success, <0 for error
3849 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3852 int nb_requests, nb_requests_max = 0;
3853 InputFilter *ifilter;
3857 ret = avfilter_graph_request_oldest(graph->graph);
3859 return reap_filters(0);
3861 if (ret == AVERROR_EOF) {
3862 ret = reap_filters(1);
3863 for (i = 0; i < graph->nb_outputs; i++)
3864 close_output_stream(graph->outputs[i]->ost);
3867 if (ret != AVERROR(EAGAIN))
3870 for (i = 0; i < graph->nb_inputs; i++) {
3871 ifilter = graph->inputs[i];
3873 if (input_files[ist->file_index]->eagain ||
3874 input_files[ist->file_index]->eof_reached)
3876 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3877 if (nb_requests > nb_requests_max) {
3878 nb_requests_max = nb_requests;
3884 for (i = 0; i < graph->nb_outputs; i++)
3885 graph->outputs[i]->ost->unavailable = 1;
3891 * Run a single step of transcoding.
3893 * @return 0 for success, <0 for error
3895 static int transcode_step(void)
3901 ost = choose_output();
3908 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3913 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3918 av_assert0(ost->source_index >= 0);
3919 ist = input_streams[ost->source_index];
3922 ret = process_input(ist->file_index);
3923 if (ret == AVERROR(EAGAIN)) {
3924 if (input_files[ist->file_index]->eagain)
3925 ost->unavailable = 1;
3930 return ret == AVERROR_EOF ? 0 : ret;
3932 return reap_filters(0);
3936 * The following code is the main loop of the file converter
3938 static int transcode(void)
3941 AVFormatContext *os;
3944 int64_t timer_start;
3946 ret = transcode_init();
3950 if (stdin_interaction) {
3951 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3954 timer_start = av_gettime_relative();
3957 if ((ret = init_input_threads()) < 0)
3961 while (!received_sigterm) {
3962 int64_t cur_time= av_gettime_relative();
3964 /* if 'q' pressed, exits */
3965 if (stdin_interaction)
3966 if (check_keyboard_interaction(cur_time) < 0)
3969 /* check if there's any stream where output is still needed */
3970 if (!need_output()) {
3971 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3975 ret = transcode_step();
3977 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3981 av_strerror(ret, errbuf, sizeof(errbuf));
3983 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3988 /* dump report by using the output first video and audio streams */
3989 print_report(0, timer_start, cur_time);
3992 free_input_threads();
3995 /* at the end of stream, we must flush the decoder buffers */
3996 for (i = 0; i < nb_input_streams; i++) {
3997 ist = input_streams[i];
3998 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3999 process_input_packet(ist, NULL);
4006 /* write the trailer if needed and close file */
4007 for (i = 0; i < nb_output_files; i++) {
4008 os = output_files[i]->ctx;
4009 av_write_trailer(os);
4012 /* dump report by using the first video and audio streams */
4013 print_report(1, timer_start, av_gettime_relative());
4015 /* close each encoder */
4016 for (i = 0; i < nb_output_streams; i++) {
4017 ost = output_streams[i];
4018 if (ost->encoding_needed) {
4019 av_freep(&ost->enc_ctx->stats_in);
4023 /* close each decoder */
4024 for (i = 0; i < nb_input_streams; i++) {
4025 ist = input_streams[i];
4026 if (ist->decoding_needed) {
4027 avcodec_close(ist->dec_ctx);
4028 if (ist->hwaccel_uninit)
4029 ist->hwaccel_uninit(ist->dec_ctx);
4038 free_input_threads();
4041 if (output_streams) {
4042 for (i = 0; i < nb_output_streams; i++) {
4043 ost = output_streams[i];
4046 fclose(ost->logfile);
4047 ost->logfile = NULL;
4049 av_freep(&ost->forced_kf_pts);
4050 av_freep(&ost->apad);
4051 av_freep(&ost->disposition);
4052 av_dict_free(&ost->encoder_opts);
4053 av_dict_free(&ost->sws_dict);
4054 av_dict_free(&ost->swr_opts);
4055 av_dict_free(&ost->resample_opts);
4056 av_dict_free(&ost->bsf_args);
4064 static int64_t getutime(void)
4067 struct rusage rusage;
4069 getrusage(RUSAGE_SELF, &rusage);
4070 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4071 #elif HAVE_GETPROCESSTIMES
4073 FILETIME c, e, k, u;
4074 proc = GetCurrentProcess();
4075 GetProcessTimes(proc, &c, &e, &k, &u);
4076 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4078 return av_gettime_relative();
4082 static int64_t getmaxrss(void)
4084 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4085 struct rusage rusage;
4086 getrusage(RUSAGE_SELF, &rusage);
4087 return (int64_t)rusage.ru_maxrss * 1024;
4088 #elif HAVE_GETPROCESSMEMORYINFO
4090 PROCESS_MEMORY_COUNTERS memcounters;
4091 proc = GetCurrentProcess();
4092 memcounters.cb = sizeof(memcounters);
4093 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4094 return memcounters.PeakPagefileUsage;
4100 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4104 int main(int argc, char **argv)
4109 register_exit(ffmpeg_cleanup);
4111 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4113 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4114 parse_loglevel(argc, argv, options);
4116 if(argc>1 && !strcmp(argv[1], "-d")){
4118 av_log_set_callback(log_callback_null);
4123 avcodec_register_all();
4125 avdevice_register_all();
4127 avfilter_register_all();
4129 avformat_network_init();
4131 show_banner(argc, argv, options);
4135 /* parse options and open all input/output files */
4136 ret = ffmpeg_parse_options(argc, argv);
4140 if (nb_output_files <= 0 && nb_input_files == 0) {
4142 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4146 /* file converter / grab */
4147 if (nb_output_files <= 0) {
4148 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4152 // if (nb_input_files == 0) {
4153 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4157 current_time = ti = getutime();
4158 if (transcode() < 0)
4160 ti = getutime() - ti;
4162 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4164 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4165 decode_error_stat[0], decode_error_stat[1]);
4166 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4169 exit_program(received_nb_signals ? 255 : main_return_code);
4170 return main_return_code;