2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
677 av_packet_split_side_data(pkt);
680 AVPacket new_pkt = *pkt;
681 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
684 int a = av_bitstream_filter_filter(bsfc, avctx,
685 bsf_arg ? bsf_arg->value : NULL,
686 &new_pkt.data, &new_pkt.size,
687 pkt->data, pkt->size,
688 pkt->flags & AV_PKT_FLAG_KEY);
689 if(a == 0 && new_pkt.data != pkt->data) {
690 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
692 memcpy(t, new_pkt.data, new_pkt.size);
693 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
701 pkt->side_data = NULL;
702 pkt->side_data_elems = 0;
704 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
705 av_buffer_default_free, NULL, 0);
710 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
711 bsfc->filter->name, pkt->stream_index,
712 avctx->codec ? avctx->codec->name : "copy");
722 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
723 if (pkt->dts != AV_NOPTS_VALUE &&
724 pkt->pts != AV_NOPTS_VALUE &&
725 pkt->dts > pkt->pts) {
726 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
728 ost->file_index, ost->st->index);
730 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
731 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
732 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
735 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
736 pkt->dts != AV_NOPTS_VALUE &&
737 ost->last_mux_dts != AV_NOPTS_VALUE) {
738 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
739 if (pkt->dts < max) {
740 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
741 av_log(s, loglevel, "Non-monotonous DTS in output stream "
742 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
743 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
745 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
748 av_log(s, loglevel, "changing to %"PRId64". This may result "
749 "in incorrect timestamps in the output file.\n",
751 if(pkt->pts >= pkt->dts)
752 pkt->pts = FFMAX(pkt->pts, max);
757 ost->last_mux_dts = pkt->dts;
759 ost->data_size += pkt->size;
760 ost->packets_written++;
762 pkt->stream_index = ost->index;
765 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
766 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
767 av_get_media_type_string(ost->enc_ctx->codec_type),
768 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
769 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
774 ret = av_interleaved_write_frame(s, pkt);
776 print_error("av_interleaved_write_frame()", ret);
777 main_return_code = 1;
778 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
783 static void close_output_stream(OutputStream *ost)
785 OutputFile *of = output_files[ost->file_index];
787 ost->finished |= ENCODER_FINISHED;
789 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
790 of->recording_time = FFMIN(of->recording_time, end);
794 static int check_recording_time(OutputStream *ost)
796 OutputFile *of = output_files[ost->file_index];
798 if (of->recording_time != INT64_MAX &&
799 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
800 AV_TIME_BASE_Q) >= 0) {
801 close_output_stream(ost);
807 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
810 AVCodecContext *enc = ost->enc_ctx;
814 av_init_packet(&pkt);
818 if (!check_recording_time(ost))
821 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
822 frame->pts = ost->sync_opts;
823 ost->sync_opts = frame->pts + frame->nb_samples;
824 ost->samples_encoded += frame->nb_samples;
825 ost->frames_encoded++;
827 av_assert0(pkt.size || !pkt.data);
828 update_benchmark(NULL);
830 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
831 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
832 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
833 enc->time_base.num, enc->time_base.den);
836 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
837 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
840 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
843 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
846 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
847 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
848 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
849 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
852 write_frame(s, &pkt, ost);
856 static void do_subtitle_out(AVFormatContext *s,
861 int subtitle_out_max_size = 1024 * 1024;
862 int subtitle_out_size, nb, i;
867 if (sub->pts == AV_NOPTS_VALUE) {
868 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
877 subtitle_out = av_malloc(subtitle_out_max_size);
879 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
884 /* Note: DVB subtitle need one packet to draw them and one other
885 packet to clear them */
886 /* XXX: signal it in the codec context ? */
887 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
892 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
894 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
895 pts -= output_files[ost->file_index]->start_time;
896 for (i = 0; i < nb; i++) {
897 unsigned save_num_rects = sub->num_rects;
899 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
900 if (!check_recording_time(ost))
904 // start_display_time is required to be 0
905 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
906 sub->end_display_time -= sub->start_display_time;
907 sub->start_display_time = 0;
911 ost->frames_encoded++;
913 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
914 subtitle_out_max_size, sub);
916 sub->num_rects = save_num_rects;
917 if (subtitle_out_size < 0) {
918 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
922 av_init_packet(&pkt);
923 pkt.data = subtitle_out;
924 pkt.size = subtitle_out_size;
925 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
926 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
927 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
928 /* XXX: the pts correction is handled here. Maybe handling
929 it in the codec would be better */
931 pkt.pts += 90 * sub->start_display_time;
933 pkt.pts += 90 * sub->end_display_time;
936 write_frame(s, &pkt, ost);
940 static void do_video_out(AVFormatContext *s,
942 AVFrame *next_picture,
945 int ret, format_video_sync;
947 AVCodecContext *enc = ost->enc_ctx;
948 AVCodecContext *mux_enc = ost->st->codec;
949 int nb_frames, nb0_frames, i;
950 double delta, delta0;
953 InputStream *ist = NULL;
954 AVFilterContext *filter = ost->filter->filter;
956 if (ost->source_index >= 0)
957 ist = input_streams[ost->source_index];
959 if (filter->inputs[0]->frame_rate.num > 0 &&
960 filter->inputs[0]->frame_rate.den > 0)
961 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
963 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
964 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
966 if (!ost->filters_script &&
970 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
971 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
976 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
977 ost->last_nb0_frames[1],
978 ost->last_nb0_frames[2]);
980 delta0 = sync_ipts - ost->sync_opts;
981 delta = delta0 + duration;
983 /* by default, we output a single frame */
987 format_video_sync = video_sync_method;
988 if (format_video_sync == VSYNC_AUTO) {
989 if(!strcmp(s->oformat->name, "avi")) {
990 format_video_sync = VSYNC_VFR;
992 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
994 && format_video_sync == VSYNC_CFR
995 && input_files[ist->file_index]->ctx->nb_streams == 1
996 && input_files[ist->file_index]->input_ts_offset == 0) {
997 format_video_sync = VSYNC_VSCFR;
999 if (format_video_sync == VSYNC_CFR && copy_ts) {
1000 format_video_sync = VSYNC_VSCFR;
1006 format_video_sync != VSYNC_PASSTHROUGH &&
1007 format_video_sync != VSYNC_DROP) {
1008 double cor = FFMIN(-delta0, duration);
1009 if (delta0 < -0.6) {
1010 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1012 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1018 switch (format_video_sync) {
1020 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1021 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1024 ost->sync_opts = lrint(sync_ipts);
1027 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1028 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1030 } else if (delta < -1.1)
1032 else if (delta > 1.1) {
1033 nb_frames = lrintf(delta);
1035 nb0_frames = lrintf(delta0 - 0.6);
1041 else if (delta > 0.6)
1042 ost->sync_opts = lrint(sync_ipts);
1045 case VSYNC_PASSTHROUGH:
1046 ost->sync_opts = lrint(sync_ipts);
1053 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1054 nb0_frames = FFMIN(nb0_frames, nb_frames);
1056 memmove(ost->last_nb0_frames + 1,
1057 ost->last_nb0_frames,
1058 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1059 ost->last_nb0_frames[0] = nb0_frames;
1061 if (nb0_frames == 0 && ost->last_droped) {
1063 av_log(NULL, AV_LOG_VERBOSE,
1064 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1065 ost->frame_number, ost->st->index, ost->last_frame->pts);
1067 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1068 if (nb_frames > dts_error_threshold * 30) {
1069 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1073 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1074 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1076 ost->last_droped = nb_frames == nb0_frames && next_picture;
1078 /* duplicates frame if needed */
1079 for (i = 0; i < nb_frames; i++) {
1080 AVFrame *in_picture;
1081 av_init_packet(&pkt);
1085 if (i < nb0_frames && ost->last_frame) {
1086 in_picture = ost->last_frame;
1088 in_picture = next_picture;
1093 in_picture->pts = ost->sync_opts;
1096 if (!check_recording_time(ost))
1098 if (ost->frame_number >= ost->max_frames)
1102 #if FF_API_LAVF_FMT_RAWPICTURE
1103 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1104 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1105 /* raw pictures are written as AVPicture structure to
1106 avoid any copies. We support temporarily the older
1108 if (in_picture->interlaced_frame)
1109 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1111 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1112 pkt.data = (uint8_t *)in_picture;
1113 pkt.size = sizeof(AVPicture);
1114 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1115 pkt.flags |= AV_PKT_FLAG_KEY;
1117 write_frame(s, &pkt, ost);
1121 int got_packet, forced_keyframe = 0;
1124 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1125 ost->top_field_first >= 0)
1126 in_picture->top_field_first = !!ost->top_field_first;
1128 if (in_picture->interlaced_frame) {
1129 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1130 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1132 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1134 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1136 in_picture->quality = enc->global_quality;
1137 in_picture->pict_type = 0;
1139 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1140 in_picture->pts * av_q2d(enc->time_base) : NAN;
1141 if (ost->forced_kf_index < ost->forced_kf_count &&
1142 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1143 ost->forced_kf_index++;
1144 forced_keyframe = 1;
1145 } else if (ost->forced_keyframes_pexpr) {
1147 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1148 res = av_expr_eval(ost->forced_keyframes_pexpr,
1149 ost->forced_keyframes_expr_const_values, NULL);
1150 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1151 ost->forced_keyframes_expr_const_values[FKF_N],
1152 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1153 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1154 ost->forced_keyframes_expr_const_values[FKF_T],
1155 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1158 forced_keyframe = 1;
1159 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1160 ost->forced_keyframes_expr_const_values[FKF_N];
1161 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1162 ost->forced_keyframes_expr_const_values[FKF_T];
1163 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1166 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1167 } else if ( ost->forced_keyframes
1168 && !strncmp(ost->forced_keyframes, "source", 6)
1169 && in_picture->key_frame==1) {
1170 forced_keyframe = 1;
1173 if (forced_keyframe) {
1174 in_picture->pict_type = AV_PICTURE_TYPE_I;
1175 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1178 update_benchmark(NULL);
1180 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1181 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1182 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1183 enc->time_base.num, enc->time_base.den);
1186 ost->frames_encoded++;
1188 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1189 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1191 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1197 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1198 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1199 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1200 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1203 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1204 pkt.pts = ost->sync_opts;
1206 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1209 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1210 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1211 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1212 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1215 frame_size = pkt.size;
1216 write_frame(s, &pkt, ost);
1218 /* if two pass, output log */
1219 if (ost->logfile && enc->stats_out) {
1220 fprintf(ost->logfile, "%s", enc->stats_out);
1226 * For video, number of frames in == number of packets out.
1227 * But there may be reordering, so we can't throw away frames on encoder
1228 * flush, we need to limit them here, before they go into encoder.
1230 ost->frame_number++;
1232 if (vstats_filename && frame_size)
1233 do_video_stats(ost, frame_size);
1236 if (!ost->last_frame)
1237 ost->last_frame = av_frame_alloc();
1238 av_frame_unref(ost->last_frame);
1239 if (next_picture && ost->last_frame)
1240 av_frame_ref(ost->last_frame, next_picture);
1242 av_frame_free(&ost->last_frame);
1245 static double psnr(double d)
1247 return -10.0 * log(d) / log(10.0);
1250 static void do_video_stats(OutputStream *ost, int frame_size)
1252 AVCodecContext *enc;
1254 double ti1, bitrate, avg_bitrate;
1256 /* this is executed just the first time do_video_stats is called */
1258 vstats_file = fopen(vstats_filename, "w");
1266 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1267 frame_number = ost->st->nb_frames;
1268 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1269 ost->quality / (float)FF_QP2LAMBDA);
1271 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1272 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1274 fprintf(vstats_file,"f_size= %6d ", frame_size);
1275 /* compute pts value */
1276 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1280 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1281 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1282 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1283 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1284 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1288 static void finish_output_stream(OutputStream *ost)
1290 OutputFile *of = output_files[ost->file_index];
1293 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1296 for (i = 0; i < of->ctx->nb_streams; i++)
1297 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1302 * Get and encode new output from any of the filtergraphs, without causing
1305 * @return 0 for success, <0 for severe errors
1307 static int reap_filters(int flush)
1309 AVFrame *filtered_frame = NULL;
1312 /* Reap all buffers present in the buffer sinks */
1313 for (i = 0; i < nb_output_streams; i++) {
1314 OutputStream *ost = output_streams[i];
1315 OutputFile *of = output_files[ost->file_index];
1316 AVFilterContext *filter;
1317 AVCodecContext *enc = ost->enc_ctx;
1322 filter = ost->filter->filter;
1324 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1325 return AVERROR(ENOMEM);
1327 filtered_frame = ost->filtered_frame;
1330 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1331 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1332 AV_BUFFERSINK_FLAG_NO_REQUEST);
1334 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1335 av_log(NULL, AV_LOG_WARNING,
1336 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1337 } else if (flush && ret == AVERROR_EOF) {
1338 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1339 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1343 if (ost->finished) {
1344 av_frame_unref(filtered_frame);
1347 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1348 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1349 AVRational tb = enc->time_base;
1350 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1352 tb.den <<= extra_bits;
1354 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1355 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1356 float_pts /= 1 << extra_bits;
1357 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1358 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1360 filtered_frame->pts =
1361 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1362 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1364 //if (ost->source_index >= 0)
1365 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1367 switch (filter->inputs[0]->type) {
1368 case AVMEDIA_TYPE_VIDEO:
1369 if (!ost->frame_aspect_ratio.num)
1370 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1373 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1374 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1376 enc->time_base.num, enc->time_base.den);
1379 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1381 case AVMEDIA_TYPE_AUDIO:
1382 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1383 enc->channels != av_frame_get_channels(filtered_frame)) {
1384 av_log(NULL, AV_LOG_ERROR,
1385 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1388 do_audio_out(of->ctx, ost, filtered_frame);
1391 // TODO support subtitle filters
1395 av_frame_unref(filtered_frame);
1402 static void print_final_stats(int64_t total_size)
1404 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1405 uint64_t subtitle_size = 0;
1406 uint64_t data_size = 0;
1407 float percent = -1.0;
1411 for (i = 0; i < nb_output_streams; i++) {
1412 OutputStream *ost = output_streams[i];
1413 switch (ost->enc_ctx->codec_type) {
1414 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1415 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1416 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1417 default: other_size += ost->data_size; break;
1419 extra_size += ost->enc_ctx->extradata_size;
1420 data_size += ost->data_size;
1421 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1422 != AV_CODEC_FLAG_PASS1)
1426 if (data_size && total_size>0 && total_size >= data_size)
1427 percent = 100.0 * (total_size - data_size) / data_size;
1429 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1430 video_size / 1024.0,
1431 audio_size / 1024.0,
1432 subtitle_size / 1024.0,
1433 other_size / 1024.0,
1434 extra_size / 1024.0);
1436 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1438 av_log(NULL, AV_LOG_INFO, "unknown");
1439 av_log(NULL, AV_LOG_INFO, "\n");
1441 /* print verbose per-stream stats */
1442 for (i = 0; i < nb_input_files; i++) {
1443 InputFile *f = input_files[i];
1444 uint64_t total_packets = 0, total_size = 0;
1446 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1447 i, f->ctx->filename);
1449 for (j = 0; j < f->nb_streams; j++) {
1450 InputStream *ist = input_streams[f->ist_index + j];
1451 enum AVMediaType type = ist->dec_ctx->codec_type;
1453 total_size += ist->data_size;
1454 total_packets += ist->nb_packets;
1456 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1457 i, j, media_type_string(type));
1458 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1459 ist->nb_packets, ist->data_size);
1461 if (ist->decoding_needed) {
1462 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1463 ist->frames_decoded);
1464 if (type == AVMEDIA_TYPE_AUDIO)
1465 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1466 av_log(NULL, AV_LOG_VERBOSE, "; ");
1469 av_log(NULL, AV_LOG_VERBOSE, "\n");
1472 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1473 total_packets, total_size);
1476 for (i = 0; i < nb_output_files; i++) {
1477 OutputFile *of = output_files[i];
1478 uint64_t total_packets = 0, total_size = 0;
1480 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1481 i, of->ctx->filename);
1483 for (j = 0; j < of->ctx->nb_streams; j++) {
1484 OutputStream *ost = output_streams[of->ost_index + j];
1485 enum AVMediaType type = ost->enc_ctx->codec_type;
1487 total_size += ost->data_size;
1488 total_packets += ost->packets_written;
1490 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1491 i, j, media_type_string(type));
1492 if (ost->encoding_needed) {
1493 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1494 ost->frames_encoded);
1495 if (type == AVMEDIA_TYPE_AUDIO)
1496 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1497 av_log(NULL, AV_LOG_VERBOSE, "; ");
1500 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1501 ost->packets_written, ost->data_size);
1503 av_log(NULL, AV_LOG_VERBOSE, "\n");
1506 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1507 total_packets, total_size);
1509 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1510 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1512 av_log(NULL, AV_LOG_WARNING, "\n");
1514 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1519 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1522 AVBPrint buf_script;
1524 AVFormatContext *oc;
1526 AVCodecContext *enc;
1527 int frame_number, vid, i;
1529 int64_t pts = INT64_MIN + 1;
1530 static int64_t last_time = -1;
1531 static int qp_histogram[52];
1532 int hours, mins, secs, us;
1534 if (!print_stats && !is_last_report && !progress_avio)
1537 if (!is_last_report) {
1538 if (last_time == -1) {
1539 last_time = cur_time;
1542 if ((cur_time - last_time) < 500000)
1544 last_time = cur_time;
1548 oc = output_files[0]->ctx;
1550 total_size = avio_size(oc->pb);
1551 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1552 total_size = avio_tell(oc->pb);
1556 av_bprint_init(&buf_script, 0, 1);
1557 for (i = 0; i < nb_output_streams; i++) {
1559 ost = output_streams[i];
1561 if (!ost->stream_copy)
1562 q = ost->quality / (float) FF_QP2LAMBDA;
1564 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1565 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1566 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1567 ost->file_index, ost->index, q);
1569 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1570 float fps, t = (cur_time-timer_start) / 1000000.0;
1572 frame_number = ost->frame_number;
1573 fps = t > 1 ? frame_number / t : 0;
1574 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1575 frame_number, fps < 9.95, fps, q);
1576 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1577 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1578 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1579 ost->file_index, ost->index, q);
1581 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1585 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1587 for (j = 0; j < 32; j++)
1588 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1591 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1593 double error, error_sum = 0;
1594 double scale, scale_sum = 0;
1596 char type[3] = { 'Y','U','V' };
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1598 for (j = 0; j < 3; j++) {
1599 if (is_last_report) {
1600 error = enc->error[j];
1601 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1603 error = ost->error[j];
1604 scale = enc->width * enc->height * 255.0 * 255.0;
1610 p = psnr(error / scale);
1611 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1612 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1613 ost->file_index, ost->index, type[j] | 32, p);
1615 p = psnr(error_sum / scale_sum);
1616 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1617 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1618 ost->file_index, ost->index, p);
1622 /* compute min output value */
1623 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1624 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1625 ost->st->time_base, AV_TIME_BASE_Q));
1627 nb_frames_drop += ost->last_droped;
1630 secs = FFABS(pts) / AV_TIME_BASE;
1631 us = FFABS(pts) % AV_TIME_BASE;
1637 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1639 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1641 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1642 "size=%8.0fkB time=", total_size / 1024.0);
1644 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1645 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1646 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1647 (100 * us) / AV_TIME_BASE);
1650 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1651 av_bprintf(&buf_script, "bitrate=N/A\n");
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1654 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1657 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1658 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1659 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1660 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1661 hours, mins, secs, us);
1663 if (nb_frames_dup || nb_frames_drop)
1664 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1665 nb_frames_dup, nb_frames_drop);
1666 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1667 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1669 if (print_stats || is_last_report) {
1670 const char end = is_last_report ? '\n' : '\r';
1671 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1672 fprintf(stderr, "%s %c", buf, end);
1674 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1679 if (progress_avio) {
1680 av_bprintf(&buf_script, "progress=%s\n",
1681 is_last_report ? "end" : "continue");
1682 avio_write(progress_avio, buf_script.str,
1683 FFMIN(buf_script.len, buf_script.size - 1));
1684 avio_flush(progress_avio);
1685 av_bprint_finalize(&buf_script, NULL);
1686 if (is_last_report) {
1687 avio_closep(&progress_avio);
1692 print_final_stats(total_size);
1695 static void flush_encoders(void)
1699 for (i = 0; i < nb_output_streams; i++) {
1700 OutputStream *ost = output_streams[i];
1701 AVCodecContext *enc = ost->enc_ctx;
1702 AVFormatContext *os = output_files[ost->file_index]->ctx;
1703 int stop_encoding = 0;
1705 if (!ost->encoding_needed)
1708 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1710 #if FF_API_LAVF_FMT_RAWPICTURE
1711 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1716 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1719 switch (enc->codec_type) {
1720 case AVMEDIA_TYPE_AUDIO:
1721 encode = avcodec_encode_audio2;
1724 case AVMEDIA_TYPE_VIDEO:
1725 encode = avcodec_encode_video2;
1736 av_init_packet(&pkt);
1740 update_benchmark(NULL);
1741 ret = encode(enc, &pkt, NULL, &got_packet);
1742 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1744 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1749 if (ost->logfile && enc->stats_out) {
1750 fprintf(ost->logfile, "%s", enc->stats_out);
1756 if (ost->finished & MUXER_FINISHED) {
1757 av_free_packet(&pkt);
1760 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1761 pkt_size = pkt.size;
1762 write_frame(os, &pkt, ost);
1763 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1764 do_video_stats(ost, pkt_size);
1775 * Check whether a packet from ist should be written into ost at this time
1777 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1779 OutputFile *of = output_files[ost->file_index];
1780 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1782 if (ost->source_index != ist_index)
1788 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1794 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1796 OutputFile *of = output_files[ost->file_index];
1797 InputFile *f = input_files [ist->file_index];
1798 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1799 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1800 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1804 av_init_packet(&opkt);
1806 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1807 !ost->copy_initial_nonkeyframes)
1810 if (pkt->pts == AV_NOPTS_VALUE) {
1811 if (!ost->frame_number && ist->pts < start_time &&
1812 !ost->copy_prior_start)
1815 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1816 !ost->copy_prior_start)
1820 if (of->recording_time != INT64_MAX &&
1821 ist->pts >= of->recording_time + start_time) {
1822 close_output_stream(ost);
1826 if (f->recording_time != INT64_MAX) {
1827 start_time = f->ctx->start_time;
1828 if (f->start_time != AV_NOPTS_VALUE)
1829 start_time += f->start_time;
1830 if (ist->pts >= f->recording_time + start_time) {
1831 close_output_stream(ost);
1836 /* force the input stream PTS */
1837 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1840 if (pkt->pts != AV_NOPTS_VALUE)
1841 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1843 opkt.pts = AV_NOPTS_VALUE;
1845 if (pkt->dts == AV_NOPTS_VALUE)
1846 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1848 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1849 opkt.dts -= ost_tb_start_time;
1851 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1852 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1854 duration = ist->dec_ctx->frame_size;
1855 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1856 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1857 ost->st->time_base) - ost_tb_start_time;
1860 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1861 opkt.flags = pkt->flags;
1862 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1863 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1864 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1865 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1866 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1868 int ret = av_parser_change(ost->parser, ost->st->codec,
1869 &opkt.data, &opkt.size,
1870 pkt->data, pkt->size,
1871 pkt->flags & AV_PKT_FLAG_KEY);
1873 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1878 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1883 opkt.data = pkt->data;
1884 opkt.size = pkt->size;
1886 av_copy_packet_side_data(&opkt, pkt);
1888 #if FF_API_LAVF_FMT_RAWPICTURE
1889 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1890 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1891 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1892 /* store AVPicture in AVPacket, as expected by the output format */
1893 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1895 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1899 opkt.data = (uint8_t *)&pict;
1900 opkt.size = sizeof(AVPicture);
1901 opkt.flags |= AV_PKT_FLAG_KEY;
1905 write_frame(of->ctx, &opkt, ost);
1908 int guess_input_channel_layout(InputStream *ist)
1910 AVCodecContext *dec = ist->dec_ctx;
1912 if (!dec->channel_layout) {
1913 char layout_name[256];
1915 if (dec->channels > ist->guess_layout_max)
1917 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1918 if (!dec->channel_layout)
1920 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1921 dec->channels, dec->channel_layout);
1922 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1923 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1928 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1930 AVFrame *decoded_frame, *f;
1931 AVCodecContext *avctx = ist->dec_ctx;
1932 int i, ret, err = 0, resample_changed;
1933 AVRational decoded_frame_tb;
1935 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1936 return AVERROR(ENOMEM);
1937 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1938 return AVERROR(ENOMEM);
1939 decoded_frame = ist->decoded_frame;
1941 update_benchmark(NULL);
1942 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1943 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1945 if (ret >= 0 && avctx->sample_rate <= 0) {
1946 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1947 ret = AVERROR_INVALIDDATA;
1950 if (*got_output || ret<0)
1951 decode_error_stat[ret<0] ++;
1953 if (ret < 0 && exit_on_error)
1956 if (!*got_output || ret < 0)
1959 ist->samples_decoded += decoded_frame->nb_samples;
1960 ist->frames_decoded++;
1963 /* increment next_dts to use for the case where the input stream does not
1964 have timestamps or there are multiple frames in the packet */
1965 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1967 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1971 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1972 ist->resample_channels != avctx->channels ||
1973 ist->resample_channel_layout != decoded_frame->channel_layout ||
1974 ist->resample_sample_rate != decoded_frame->sample_rate;
1975 if (resample_changed) {
1976 char layout1[64], layout2[64];
1978 if (!guess_input_channel_layout(ist)) {
1979 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1980 "layout for Input Stream #%d.%d\n", ist->file_index,
1984 decoded_frame->channel_layout = avctx->channel_layout;
1986 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1987 ist->resample_channel_layout);
1988 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1989 decoded_frame->channel_layout);
1991 av_log(NULL, AV_LOG_INFO,
1992 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1993 ist->file_index, ist->st->index,
1994 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1995 ist->resample_channels, layout1,
1996 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1997 avctx->channels, layout2);
1999 ist->resample_sample_fmt = decoded_frame->format;
2000 ist->resample_sample_rate = decoded_frame->sample_rate;
2001 ist->resample_channel_layout = decoded_frame->channel_layout;
2002 ist->resample_channels = avctx->channels;
2004 for (i = 0; i < nb_filtergraphs; i++)
2005 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2006 FilterGraph *fg = filtergraphs[i];
2007 if (configure_filtergraph(fg) < 0) {
2008 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2014 /* if the decoder provides a pts, use it instead of the last packet pts.
2015 the decoder could be delaying output by a packet or more. */
2016 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2017 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2018 decoded_frame_tb = avctx->time_base;
2019 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2020 decoded_frame->pts = decoded_frame->pkt_pts;
2021 decoded_frame_tb = ist->st->time_base;
2022 } else if (pkt->pts != AV_NOPTS_VALUE) {
2023 decoded_frame->pts = pkt->pts;
2024 decoded_frame_tb = ist->st->time_base;
2026 decoded_frame->pts = ist->dts;
2027 decoded_frame_tb = AV_TIME_BASE_Q;
2029 pkt->pts = AV_NOPTS_VALUE;
2030 if (decoded_frame->pts != AV_NOPTS_VALUE)
2031 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2032 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2033 (AVRational){1, avctx->sample_rate});
2034 ist->nb_samples = decoded_frame->nb_samples;
2035 for (i = 0; i < ist->nb_filters; i++) {
2036 if (i < ist->nb_filters - 1) {
2037 f = ist->filter_frame;
2038 err = av_frame_ref(f, decoded_frame);
2043 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2044 AV_BUFFERSRC_FLAG_PUSH);
2045 if (err == AVERROR_EOF)
2046 err = 0; /* ignore */
2050 decoded_frame->pts = AV_NOPTS_VALUE;
2052 av_frame_unref(ist->filter_frame);
2053 av_frame_unref(decoded_frame);
2054 return err < 0 ? err : ret;
2057 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2059 AVFrame *decoded_frame, *f;
2060 int i, ret = 0, err = 0, resample_changed;
2061 int64_t best_effort_timestamp;
2062 AVRational *frame_sample_aspect;
2064 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2065 return AVERROR(ENOMEM);
2066 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2067 return AVERROR(ENOMEM);
2068 decoded_frame = ist->decoded_frame;
2069 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2071 update_benchmark(NULL);
2072 ret = avcodec_decode_video2(ist->dec_ctx,
2073 decoded_frame, got_output, pkt);
2074 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2076 // The following line may be required in some cases where there is no parser
2077 // or the parser does not has_b_frames correctly
2078 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2079 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2080 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2082 av_log(ist->dec_ctx, AV_LOG_WARNING,
2083 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2084 "If you want to help, upload a sample "
2085 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2086 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2087 ist->dec_ctx->has_b_frames,
2088 ist->st->codec->has_b_frames);
2091 if (*got_output || ret<0)
2092 decode_error_stat[ret<0] ++;
2094 if (ret < 0 && exit_on_error)
2097 if (*got_output && ret >= 0) {
2098 if (ist->dec_ctx->width != decoded_frame->width ||
2099 ist->dec_ctx->height != decoded_frame->height ||
2100 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2101 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2102 decoded_frame->width,
2103 decoded_frame->height,
2104 decoded_frame->format,
2105 ist->dec_ctx->width,
2106 ist->dec_ctx->height,
2107 ist->dec_ctx->pix_fmt);
2111 if (!*got_output || ret < 0)
2114 if(ist->top_field_first>=0)
2115 decoded_frame->top_field_first = ist->top_field_first;
2117 ist->frames_decoded++;
2119 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2120 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2124 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2126 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2127 if(best_effort_timestamp != AV_NOPTS_VALUE)
2128 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2131 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2132 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2133 ist->st->index, av_ts2str(decoded_frame->pts),
2134 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2135 best_effort_timestamp,
2136 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2137 decoded_frame->key_frame, decoded_frame->pict_type,
2138 ist->st->time_base.num, ist->st->time_base.den);
2143 if (ist->st->sample_aspect_ratio.num)
2144 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2146 resample_changed = ist->resample_width != decoded_frame->width ||
2147 ist->resample_height != decoded_frame->height ||
2148 ist->resample_pix_fmt != decoded_frame->format;
2149 if (resample_changed) {
2150 av_log(NULL, AV_LOG_INFO,
2151 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2152 ist->file_index, ist->st->index,
2153 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2154 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2156 ist->resample_width = decoded_frame->width;
2157 ist->resample_height = decoded_frame->height;
2158 ist->resample_pix_fmt = decoded_frame->format;
2160 for (i = 0; i < nb_filtergraphs; i++) {
2161 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2162 configure_filtergraph(filtergraphs[i]) < 0) {
2163 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2169 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2170 for (i = 0; i < ist->nb_filters; i++) {
2171 if (!frame_sample_aspect->num)
2172 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2174 if (i < ist->nb_filters - 1) {
2175 f = ist->filter_frame;
2176 err = av_frame_ref(f, decoded_frame);
2181 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2182 if (ret == AVERROR_EOF) {
2183 ret = 0; /* ignore */
2184 } else if (ret < 0) {
2185 av_log(NULL, AV_LOG_FATAL,
2186 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2192 av_frame_unref(ist->filter_frame);
2193 av_frame_unref(decoded_frame);
2194 return err < 0 ? err : ret;
2197 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2199 AVSubtitle subtitle;
2200 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2201 &subtitle, got_output, pkt);
2203 if (*got_output || ret<0)
2204 decode_error_stat[ret<0] ++;
2206 if (ret < 0 && exit_on_error)
2209 if (ret < 0 || !*got_output) {
2211 sub2video_flush(ist);
2215 if (ist->fix_sub_duration) {
2217 if (ist->prev_sub.got_output) {
2218 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2219 1000, AV_TIME_BASE);
2220 if (end < ist->prev_sub.subtitle.end_display_time) {
2221 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2222 "Subtitle duration reduced from %d to %d%s\n",
2223 ist->prev_sub.subtitle.end_display_time, end,
2224 end <= 0 ? ", dropping it" : "");
2225 ist->prev_sub.subtitle.end_display_time = end;
2228 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2229 FFSWAP(int, ret, ist->prev_sub.ret);
2230 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2238 sub2video_update(ist, &subtitle);
2240 if (!subtitle.num_rects)
2243 ist->frames_decoded++;
2245 for (i = 0; i < nb_output_streams; i++) {
2246 OutputStream *ost = output_streams[i];
2248 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2249 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2252 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2256 avsubtitle_free(&subtitle);
2260 static int send_filter_eof(InputStream *ist)
2263 for (i = 0; i < ist->nb_filters; i++) {
2264 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2271 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2272 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2278 if (!ist->saw_first_ts) {
2279 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2281 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2282 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2283 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2285 ist->saw_first_ts = 1;
2288 if (ist->next_dts == AV_NOPTS_VALUE)
2289 ist->next_dts = ist->dts;
2290 if (ist->next_pts == AV_NOPTS_VALUE)
2291 ist->next_pts = ist->pts;
2295 av_init_packet(&avpkt);
2303 if (pkt->dts != AV_NOPTS_VALUE) {
2304 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2305 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2306 ist->next_pts = ist->pts = ist->dts;
2309 // while we have more to decode or while the decoder did output something on EOF
2310 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2314 ist->pts = ist->next_pts;
2315 ist->dts = ist->next_dts;
2317 if (avpkt.size && avpkt.size != pkt->size &&
2318 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2319 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2320 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2321 ist->showed_multi_packet_warning = 1;
2324 switch (ist->dec_ctx->codec_type) {
2325 case AVMEDIA_TYPE_AUDIO:
2326 ret = decode_audio (ist, &avpkt, &got_output);
2328 case AVMEDIA_TYPE_VIDEO:
2329 ret = decode_video (ist, &avpkt, &got_output);
2330 if (avpkt.duration) {
2331 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2332 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2333 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2334 duration = ((int64_t)AV_TIME_BASE *
2335 ist->dec_ctx->framerate.den * ticks) /
2336 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2340 if(ist->dts != AV_NOPTS_VALUE && duration) {
2341 ist->next_dts += duration;
2343 ist->next_dts = AV_NOPTS_VALUE;
2346 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2348 case AVMEDIA_TYPE_SUBTITLE:
2349 ret = transcode_subtitles(ist, &avpkt, &got_output);
2356 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2357 ist->file_index, ist->st->index, av_err2str(ret));
2364 avpkt.pts= AV_NOPTS_VALUE;
2366 // touch data and size only if not EOF
2368 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2376 if (got_output && !pkt)
2380 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2381 /* except when looping we need to flush but not to send an EOF */
2382 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2383 int ret = send_filter_eof(ist);
2385 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2390 /* handle stream copy */
2391 if (!ist->decoding_needed) {
2392 ist->dts = ist->next_dts;
2393 switch (ist->dec_ctx->codec_type) {
2394 case AVMEDIA_TYPE_AUDIO:
2395 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2396 ist->dec_ctx->sample_rate;
2398 case AVMEDIA_TYPE_VIDEO:
2399 if (ist->framerate.num) {
2400 // TODO: Remove work-around for c99-to-c89 issue 7
2401 AVRational time_base_q = AV_TIME_BASE_Q;
2402 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2403 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2404 } else if (pkt->duration) {
2405 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2406 } else if(ist->dec_ctx->framerate.num != 0) {
2407 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2408 ist->next_dts += ((int64_t)AV_TIME_BASE *
2409 ist->dec_ctx->framerate.den * ticks) /
2410 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2414 ist->pts = ist->dts;
2415 ist->next_pts = ist->next_dts;
2417 for (i = 0; pkt && i < nb_output_streams; i++) {
2418 OutputStream *ost = output_streams[i];
2420 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2423 do_streamcopy(ist, ost, pkt);
2429 static void print_sdp(void)
2434 AVIOContext *sdp_pb;
2435 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2439 for (i = 0, j = 0; i < nb_output_files; i++) {
2440 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2441 avc[j] = output_files[i]->ctx;
2446 av_sdp_create(avc, j, sdp, sizeof(sdp));
2448 if (!sdp_filename) {
2449 printf("SDP:\n%s\n", sdp);
2452 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2453 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2455 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2456 avio_closep(&sdp_pb);
2457 av_freep(&sdp_filename);
2464 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2467 for (i = 0; hwaccels[i].name; i++)
2468 if (hwaccels[i].pix_fmt == pix_fmt)
2469 return &hwaccels[i];
2473 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2475 InputStream *ist = s->opaque;
2476 const enum AVPixelFormat *p;
2479 for (p = pix_fmts; *p != -1; p++) {
2480 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2481 const HWAccel *hwaccel;
2483 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2486 hwaccel = get_hwaccel(*p);
2488 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2489 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2492 ret = hwaccel->init(s);
2494 if (ist->hwaccel_id == hwaccel->id) {
2495 av_log(NULL, AV_LOG_FATAL,
2496 "%s hwaccel requested for input stream #%d:%d, "
2497 "but cannot be initialized.\n", hwaccel->name,
2498 ist->file_index, ist->st->index);
2499 return AV_PIX_FMT_NONE;
2503 ist->active_hwaccel_id = hwaccel->id;
2504 ist->hwaccel_pix_fmt = *p;
2511 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2513 InputStream *ist = s->opaque;
2515 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2516 return ist->hwaccel_get_buffer(s, frame, flags);
2518 return avcodec_default_get_buffer2(s, frame, flags);
2521 static int init_input_stream(int ist_index, char *error, int error_len)
2524 InputStream *ist = input_streams[ist_index];
2526 if (ist->decoding_needed) {
2527 AVCodec *codec = ist->dec;
2529 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2530 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2531 return AVERROR(EINVAL);
2534 ist->dec_ctx->opaque = ist;
2535 ist->dec_ctx->get_format = get_format;
2536 ist->dec_ctx->get_buffer2 = get_buffer;
2537 ist->dec_ctx->thread_safe_callbacks = 1;
2539 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2540 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2541 (ist->decoding_needed & DECODING_FOR_OST)) {
2542 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2543 if (ist->decoding_needed & DECODING_FOR_FILTER)
2544 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2547 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2548 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2549 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2550 if (ret == AVERROR_EXPERIMENTAL)
2551 abort_codec_experimental(codec, 0);
2553 snprintf(error, error_len,
2554 "Error while opening decoder for input stream "
2556 ist->file_index, ist->st->index, av_err2str(ret));
2559 assert_avoptions(ist->decoder_opts);
2562 ist->next_pts = AV_NOPTS_VALUE;
2563 ist->next_dts = AV_NOPTS_VALUE;
2568 static InputStream *get_input_stream(OutputStream *ost)
2570 if (ost->source_index >= 0)
2571 return input_streams[ost->source_index];
2575 static int compare_int64(const void *a, const void *b)
2577 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2578 return va < vb ? -1 : va > vb ? +1 : 0;
2581 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2585 if (ost->encoding_needed) {
2586 AVCodec *codec = ost->enc;
2587 AVCodecContext *dec = NULL;
2590 if ((ist = get_input_stream(ost)))
2592 if (dec && dec->subtitle_header) {
2593 /* ASS code assumes this buffer is null terminated so add extra byte. */
2594 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2595 if (!ost->enc_ctx->subtitle_header)
2596 return AVERROR(ENOMEM);
2597 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2598 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2600 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2601 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2602 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2604 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2605 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2606 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2608 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2609 if (ret == AVERROR_EXPERIMENTAL)
2610 abort_codec_experimental(codec, 1);
2611 snprintf(error, error_len,
2612 "Error while opening encoder for output stream #%d:%d - "
2613 "maybe incorrect parameters such as bit_rate, rate, width or height",
2614 ost->file_index, ost->index);
2617 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2618 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2619 av_buffersink_set_frame_size(ost->filter->filter,
2620 ost->enc_ctx->frame_size);
2621 assert_avoptions(ost->encoder_opts);
2622 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2623 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2624 " It takes bits/s as argument, not kbits/s\n");
2626 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2628 av_log(NULL, AV_LOG_FATAL,
2629 "Error initializing the output stream codec context.\n");
2633 // copy timebase while removing common factors
2634 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2635 ost->st->codec->codec= ost->enc_ctx->codec;
2637 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2639 av_log(NULL, AV_LOG_FATAL,
2640 "Error setting up codec context options.\n");
2643 // copy timebase while removing common factors
2644 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2650 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2651 AVCodecContext *avctx)
2654 int n = 1, i, size, index = 0;
2657 for (p = kf; *p; p++)
2661 pts = av_malloc_array(size, sizeof(*pts));
2663 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2668 for (i = 0; i < n; i++) {
2669 char *next = strchr(p, ',');
2674 if (!memcmp(p, "chapters", 8)) {
2676 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2679 if (avf->nb_chapters > INT_MAX - size ||
2680 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2682 av_log(NULL, AV_LOG_FATAL,
2683 "Could not allocate forced key frames array.\n");
2686 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2687 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2689 for (j = 0; j < avf->nb_chapters; j++) {
2690 AVChapter *c = avf->chapters[j];
2691 av_assert1(index < size);
2692 pts[index++] = av_rescale_q(c->start, c->time_base,
2693 avctx->time_base) + t;
2698 t = parse_time_or_die("force_key_frames", p, 1);
2699 av_assert1(index < size);
2700 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2707 av_assert0(index == size);
2708 qsort(pts, size, sizeof(*pts), compare_int64);
2709 ost->forced_kf_count = size;
2710 ost->forced_kf_pts = pts;
2713 static void report_new_stream(int input_index, AVPacket *pkt)
2715 InputFile *file = input_files[input_index];
2716 AVStream *st = file->ctx->streams[pkt->stream_index];
2718 if (pkt->stream_index < file->nb_streams_warn)
2720 av_log(file->ctx, AV_LOG_WARNING,
2721 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2722 av_get_media_type_string(st->codec->codec_type),
2723 input_index, pkt->stream_index,
2724 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2725 file->nb_streams_warn = pkt->stream_index + 1;
2728 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2730 AVDictionaryEntry *e;
2732 uint8_t *encoder_string;
2733 int encoder_string_len;
2734 int format_flags = 0;
2735 int codec_flags = 0;
2737 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2740 e = av_dict_get(of->opts, "fflags", NULL, 0);
2742 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2745 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2747 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2749 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2752 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2755 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2756 encoder_string = av_mallocz(encoder_string_len);
2757 if (!encoder_string)
2760 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2761 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2763 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2764 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2765 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2766 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2769 static int transcode_init(void)
2771 int ret = 0, i, j, k;
2772 AVFormatContext *oc;
2775 char error[1024] = {0};
2778 for (i = 0; i < nb_filtergraphs; i++) {
2779 FilterGraph *fg = filtergraphs[i];
2780 for (j = 0; j < fg->nb_outputs; j++) {
2781 OutputFilter *ofilter = fg->outputs[j];
2782 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2784 if (fg->nb_inputs != 1)
2786 for (k = nb_input_streams-1; k >= 0 ; k--)
2787 if (fg->inputs[0]->ist == input_streams[k])
2789 ofilter->ost->source_index = k;
2793 /* init framerate emulation */
2794 for (i = 0; i < nb_input_files; i++) {
2795 InputFile *ifile = input_files[i];
2796 if (ifile->rate_emu)
2797 for (j = 0; j < ifile->nb_streams; j++)
2798 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2801 /* for each output stream, we compute the right encoding parameters */
2802 for (i = 0; i < nb_output_streams; i++) {
2803 AVCodecContext *enc_ctx;
2804 AVCodecContext *dec_ctx = NULL;
2805 ost = output_streams[i];
2806 oc = output_files[ost->file_index]->ctx;
2807 ist = get_input_stream(ost);
2809 if (ost->attachment_filename)
2812 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2815 dec_ctx = ist->dec_ctx;
2817 ost->st->disposition = ist->st->disposition;
2818 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2819 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2821 for (j=0; j<oc->nb_streams; j++) {
2822 AVStream *st = oc->streams[j];
2823 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2826 if (j == oc->nb_streams)
2827 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2828 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2831 if (ost->stream_copy) {
2833 uint64_t extra_size;
2835 av_assert0(ist && !ost->filter);
2837 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2839 if (extra_size > INT_MAX) {
2840 return AVERROR(EINVAL);
2843 /* if stream_copy is selected, no need to decode or encode */
2844 enc_ctx->codec_id = dec_ctx->codec_id;
2845 enc_ctx->codec_type = dec_ctx->codec_type;
2847 if (!enc_ctx->codec_tag) {
2848 unsigned int codec_tag;
2849 if (!oc->oformat->codec_tag ||
2850 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2851 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2852 enc_ctx->codec_tag = dec_ctx->codec_tag;
2855 enc_ctx->bit_rate = dec_ctx->bit_rate;
2856 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2857 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2858 enc_ctx->field_order = dec_ctx->field_order;
2859 if (dec_ctx->extradata_size) {
2860 enc_ctx->extradata = av_mallocz(extra_size);
2861 if (!enc_ctx->extradata) {
2862 return AVERROR(ENOMEM);
2864 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2866 enc_ctx->extradata_size= dec_ctx->extradata_size;
2867 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2869 enc_ctx->time_base = ist->st->time_base;
2871 * Avi is a special case here because it supports variable fps but
2872 * having the fps and timebase differe significantly adds quite some
2875 if(!strcmp(oc->oformat->name, "avi")) {
2876 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2877 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2878 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2879 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2881 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2882 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2883 enc_ctx->ticks_per_frame = 2;
2884 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2885 && av_q2d(ist->st->time_base) < 1.0/500
2887 enc_ctx->time_base = dec_ctx->time_base;
2888 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2889 enc_ctx->time_base.den *= 2;
2890 enc_ctx->ticks_per_frame = 2;
2892 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2893 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2894 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2895 && strcmp(oc->oformat->name, "f4v")
2897 if( copy_tb<0 && dec_ctx->time_base.den
2898 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2899 && av_q2d(ist->st->time_base) < 1.0/500
2901 enc_ctx->time_base = dec_ctx->time_base;
2902 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2905 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2906 && dec_ctx->time_base.num < dec_ctx->time_base.den
2907 && dec_ctx->time_base.num > 0
2908 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2909 enc_ctx->time_base = dec_ctx->time_base;
2912 if (!ost->frame_rate.num)
2913 ost->frame_rate = ist->framerate;
2914 if(ost->frame_rate.num)
2915 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2917 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2918 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2920 if (ist->st->nb_side_data) {
2921 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2922 sizeof(*ist->st->side_data));
2923 if (!ost->st->side_data)
2924 return AVERROR(ENOMEM);
2926 ost->st->nb_side_data = 0;
2927 for (j = 0; j < ist->st->nb_side_data; j++) {
2928 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2929 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2931 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2934 sd_dst->data = av_malloc(sd_src->size);
2936 return AVERROR(ENOMEM);
2937 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2938 sd_dst->size = sd_src->size;
2939 sd_dst->type = sd_src->type;
2940 ost->st->nb_side_data++;
2944 ost->parser = av_parser_init(enc_ctx->codec_id);
2946 switch (enc_ctx->codec_type) {
2947 case AVMEDIA_TYPE_AUDIO:
2948 if (audio_volume != 256) {
2949 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2952 enc_ctx->channel_layout = dec_ctx->channel_layout;
2953 enc_ctx->sample_rate = dec_ctx->sample_rate;
2954 enc_ctx->channels = dec_ctx->channels;
2955 enc_ctx->frame_size = dec_ctx->frame_size;
2956 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2957 enc_ctx->block_align = dec_ctx->block_align;
2958 enc_ctx->initial_padding = dec_ctx->delay;
2959 #if FF_API_AUDIOENC_DELAY
2960 enc_ctx->delay = dec_ctx->delay;
2962 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2963 enc_ctx->block_align= 0;
2964 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2965 enc_ctx->block_align= 0;
2967 case AVMEDIA_TYPE_VIDEO:
2968 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2969 enc_ctx->width = dec_ctx->width;
2970 enc_ctx->height = dec_ctx->height;
2971 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2972 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2974 av_mul_q(ost->frame_aspect_ratio,
2975 (AVRational){ enc_ctx->height, enc_ctx->width });
2976 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2977 "with stream copy may produce invalid files\n");
2979 else if (ist->st->sample_aspect_ratio.num)
2980 sar = ist->st->sample_aspect_ratio;
2982 sar = dec_ctx->sample_aspect_ratio;
2983 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2984 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2985 ost->st->r_frame_rate = ist->st->r_frame_rate;
2987 case AVMEDIA_TYPE_SUBTITLE:
2988 enc_ctx->width = dec_ctx->width;
2989 enc_ctx->height = dec_ctx->height;
2991 case AVMEDIA_TYPE_UNKNOWN:
2992 case AVMEDIA_TYPE_DATA:
2993 case AVMEDIA_TYPE_ATTACHMENT:
3000 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3002 /* should only happen when a default codec is not present. */
3003 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3004 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3005 ret = AVERROR(EINVAL);
3009 set_encoder_id(output_files[ost->file_index], ost);
3012 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3013 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3015 fg = init_simple_filtergraph(ist, ost);
3016 if (configure_filtergraph(fg)) {
3017 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3022 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3023 if (!ost->frame_rate.num)
3024 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3025 if (ist && !ost->frame_rate.num)
3026 ost->frame_rate = ist->framerate;
3027 if (ist && !ost->frame_rate.num)
3028 ost->frame_rate = ist->st->r_frame_rate;
3029 if (ist && !ost->frame_rate.num) {
3030 ost->frame_rate = (AVRational){25, 1};
3031 av_log(NULL, AV_LOG_WARNING,
3033 "about the input framerate is available. Falling "
3034 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3035 "if you want a different framerate.\n",
3036 ost->file_index, ost->index);
3038 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3039 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3040 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3041 ost->frame_rate = ost->enc->supported_framerates[idx];
3043 // reduce frame rate for mpeg4 to be within the spec limits
3044 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3045 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3046 ost->frame_rate.num, ost->frame_rate.den, 65535);
3050 switch (enc_ctx->codec_type) {
3051 case AVMEDIA_TYPE_AUDIO:
3052 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3053 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3054 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3055 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3056 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3058 case AVMEDIA_TYPE_VIDEO:
3059 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3060 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3061 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3062 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3063 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3064 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3065 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3067 for (j = 0; j < ost->forced_kf_count; j++)
3068 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3070 enc_ctx->time_base);
3072 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3073 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3074 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3075 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3076 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3077 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3078 if (!strncmp(ost->enc->name, "libx264", 7) &&
3079 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3080 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3081 av_log(NULL, AV_LOG_WARNING,
3082 "No pixel format specified, %s for H.264 encoding chosen.\n"
3083 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3084 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3085 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3086 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3087 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3088 av_log(NULL, AV_LOG_WARNING,
3089 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3090 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3091 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3092 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3094 ost->st->avg_frame_rate = ost->frame_rate;
3097 enc_ctx->width != dec_ctx->width ||
3098 enc_ctx->height != dec_ctx->height ||
3099 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3100 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3103 if (ost->forced_keyframes) {
3104 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3105 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3106 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3108 av_log(NULL, AV_LOG_ERROR,
3109 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3112 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3113 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3114 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3115 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3117 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3118 // parse it only for static kf timings
3119 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3120 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3124 case AVMEDIA_TYPE_SUBTITLE:
3125 enc_ctx->time_base = (AVRational){1, 1000};
3126 if (!enc_ctx->width) {
3127 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3128 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3131 case AVMEDIA_TYPE_DATA:
3139 if (ost->disposition) {
3140 static const AVOption opts[] = {
3141 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3142 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3143 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3144 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3145 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3146 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3147 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3148 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3149 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3150 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3151 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3152 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3153 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3154 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3157 static const AVClass class = {
3159 .item_name = av_default_item_name,
3161 .version = LIBAVUTIL_VERSION_INT,
3163 const AVClass *pclass = &class;
3165 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3171 /* open each encoder */
3172 for (i = 0; i < nb_output_streams; i++) {
3173 ret = init_output_stream(output_streams[i], error, sizeof(error));
3178 /* init input streams */
3179 for (i = 0; i < nb_input_streams; i++)
3180 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3181 for (i = 0; i < nb_output_streams; i++) {
3182 ost = output_streams[i];
3183 avcodec_close(ost->enc_ctx);
3188 /* discard unused programs */
3189 for (i = 0; i < nb_input_files; i++) {
3190 InputFile *ifile = input_files[i];
3191 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3192 AVProgram *p = ifile->ctx->programs[j];
3193 int discard = AVDISCARD_ALL;
3195 for (k = 0; k < p->nb_stream_indexes; k++)
3196 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3197 discard = AVDISCARD_DEFAULT;
3200 p->discard = discard;
3204 /* open files and write file headers */
3205 for (i = 0; i < nb_output_files; i++) {
3206 oc = output_files[i]->ctx;
3207 oc->interrupt_callback = int_cb;
3208 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3209 snprintf(error, sizeof(error),
3210 "Could not write header for output file #%d "
3211 "(incorrect codec parameters ?): %s",
3212 i, av_err2str(ret));
3213 ret = AVERROR(EINVAL);
3216 // assert_avoptions(output_files[i]->opts);
3217 if (strcmp(oc->oformat->name, "rtp")) {
3223 /* dump the file output parameters - cannot be done before in case
3225 for (i = 0; i < nb_output_files; i++) {
3226 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3229 /* dump the stream mapping */
3230 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3231 for (i = 0; i < nb_input_streams; i++) {
3232 ist = input_streams[i];
3234 for (j = 0; j < ist->nb_filters; j++) {
3235 if (ist->filters[j]->graph->graph_desc) {
3236 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3237 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3238 ist->filters[j]->name);
3239 if (nb_filtergraphs > 1)
3240 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3241 av_log(NULL, AV_LOG_INFO, "\n");
3246 for (i = 0; i < nb_output_streams; i++) {
3247 ost = output_streams[i];
3249 if (ost->attachment_filename) {
3250 /* an attached file */
3251 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3252 ost->attachment_filename, ost->file_index, ost->index);
3256 if (ost->filter && ost->filter->graph->graph_desc) {
3257 /* output from a complex graph */
3258 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3259 if (nb_filtergraphs > 1)
3260 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3262 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3263 ost->index, ost->enc ? ost->enc->name : "?");
3267 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3268 input_streams[ost->source_index]->file_index,
3269 input_streams[ost->source_index]->st->index,
3272 if (ost->sync_ist != input_streams[ost->source_index])
3273 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3274 ost->sync_ist->file_index,
3275 ost->sync_ist->st->index);
3276 if (ost->stream_copy)
3277 av_log(NULL, AV_LOG_INFO, " (copy)");
3279 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3280 const AVCodec *out_codec = ost->enc;
3281 const char *decoder_name = "?";
3282 const char *in_codec_name = "?";
3283 const char *encoder_name = "?";
3284 const char *out_codec_name = "?";
3285 const AVCodecDescriptor *desc;
3288 decoder_name = in_codec->name;
3289 desc = avcodec_descriptor_get(in_codec->id);
3291 in_codec_name = desc->name;
3292 if (!strcmp(decoder_name, in_codec_name))
3293 decoder_name = "native";
3297 encoder_name = out_codec->name;
3298 desc = avcodec_descriptor_get(out_codec->id);
3300 out_codec_name = desc->name;
3301 if (!strcmp(encoder_name, out_codec_name))
3302 encoder_name = "native";
3305 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3306 in_codec_name, decoder_name,
3307 out_codec_name, encoder_name);
3309 av_log(NULL, AV_LOG_INFO, "\n");
3313 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3317 if (sdp_filename || want_sdp) {
3321 transcode_init_done = 1;
3326 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3327 static int need_output(void)
3331 for (i = 0; i < nb_output_streams; i++) {
3332 OutputStream *ost = output_streams[i];
3333 OutputFile *of = output_files[ost->file_index];
3334 AVFormatContext *os = output_files[ost->file_index]->ctx;
3336 if (ost->finished ||
3337 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3339 if (ost->frame_number >= ost->max_frames) {
3341 for (j = 0; j < of->ctx->nb_streams; j++)
3342 close_output_stream(output_streams[of->ost_index + j]);
3353 * Select the output stream to process.
3355 * @return selected output stream, or NULL if none available
3357 static OutputStream *choose_output(void)
3360 int64_t opts_min = INT64_MAX;
3361 OutputStream *ost_min = NULL;
3363 for (i = 0; i < nb_output_streams; i++) {
3364 OutputStream *ost = output_streams[i];
3365 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3367 if (!ost->finished && opts < opts_min) {
3369 ost_min = ost->unavailable ? NULL : ost;
3375 static int check_keyboard_interaction(int64_t cur_time)
3378 static int64_t last_time;
3379 if (received_nb_signals)
3380 return AVERROR_EXIT;
3381 /* read_key() returns 0 on EOF */
3382 if(cur_time - last_time >= 100000 && !run_as_daemon){
3384 last_time = cur_time;
3388 return AVERROR_EXIT;
3389 if (key == '+') av_log_set_level(av_log_get_level()+10);
3390 if (key == '-') av_log_set_level(av_log_get_level()-10);
3391 if (key == 's') qp_hist ^= 1;
3394 do_hex_dump = do_pkt_dump = 0;
3395 } else if(do_pkt_dump){
3399 av_log_set_level(AV_LOG_DEBUG);
3401 if (key == 'c' || key == 'C'){
3402 char buf[4096], target[64], command[256], arg[256] = {0};
3405 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3407 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3412 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3413 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3414 target, time, command, arg);
3415 for (i = 0; i < nb_filtergraphs; i++) {
3416 FilterGraph *fg = filtergraphs[i];
3419 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3420 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3421 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3422 } else if (key == 'c') {
3423 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3424 ret = AVERROR_PATCHWELCOME;
3426 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3428 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3433 av_log(NULL, AV_LOG_ERROR,
3434 "Parse error, at least 3 arguments were expected, "
3435 "only %d given in string '%s'\n", n, buf);
3438 if (key == 'd' || key == 'D'){
3441 debug = input_streams[0]->st->codec->debug<<1;
3442 if(!debug) debug = 1;
3443 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3449 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3453 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3454 fprintf(stderr,"error parsing debug value\n");
3456 for(i=0;i<nb_input_streams;i++) {
3457 input_streams[i]->st->codec->debug = debug;
3459 for(i=0;i<nb_output_streams;i++) {
3460 OutputStream *ost = output_streams[i];
3461 ost->enc_ctx->debug = debug;
3463 if(debug) av_log_set_level(AV_LOG_DEBUG);
3464 fprintf(stderr,"debug=%d\n", debug);
3467 fprintf(stderr, "key function\n"
3468 "? show this help\n"
3469 "+ increase verbosity\n"
3470 "- decrease verbosity\n"
3471 "c Send command to first matching filter supporting it\n"
3472 "C Send/Que command to all matching filters\n"
3473 "D cycle through available debug modes\n"
3474 "h dump packets/hex press to cycle through the 3 states\n"
3476 "s Show QP histogram\n"
3483 static void *input_thread(void *arg)
3486 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3491 ret = av_read_frame(f->ctx, &pkt);
3493 if (ret == AVERROR(EAGAIN)) {
3498 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3501 av_dup_packet(&pkt);
3502 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3503 if (flags && ret == AVERROR(EAGAIN)) {
3505 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3506 av_log(f->ctx, AV_LOG_WARNING,
3507 "Thread message queue blocking; consider raising the "
3508 "thread_queue_size option (current value: %d)\n",
3509 f->thread_queue_size);
3512 if (ret != AVERROR_EOF)
3513 av_log(f->ctx, AV_LOG_ERROR,
3514 "Unable to send packet to main thread: %s\n",
3516 av_free_packet(&pkt);
3517 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3525 static void free_input_threads(void)
3529 for (i = 0; i < nb_input_files; i++) {
3530 InputFile *f = input_files[i];
3533 if (!f || !f->in_thread_queue)
3535 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3536 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3537 av_free_packet(&pkt);
3539 pthread_join(f->thread, NULL);
3541 av_thread_message_queue_free(&f->in_thread_queue);
3545 static int init_input_threads(void)
3549 if (nb_input_files == 1)
3552 for (i = 0; i < nb_input_files; i++) {
3553 InputFile *f = input_files[i];
3555 if (f->ctx->pb ? !f->ctx->pb->seekable :
3556 strcmp(f->ctx->iformat->name, "lavfi"))
3557 f->non_blocking = 1;
3558 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3559 f->thread_queue_size, sizeof(AVPacket));
3563 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3564 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3565 av_thread_message_queue_free(&f->in_thread_queue);
3566 return AVERROR(ret);
3572 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3574 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3576 AV_THREAD_MESSAGE_NONBLOCK : 0);
3580 static int get_input_packet(InputFile *f, AVPacket *pkt)
3584 for (i = 0; i < f->nb_streams; i++) {
3585 InputStream *ist = input_streams[f->ist_index + i];
3586 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3587 int64_t now = av_gettime_relative() - ist->start;
3589 return AVERROR(EAGAIN);
3594 if (nb_input_files > 1)
3595 return get_input_packet_mt(f, pkt);
3597 return av_read_frame(f->ctx, pkt);
3600 static int got_eagain(void)
3603 for (i = 0; i < nb_output_streams; i++)
3604 if (output_streams[i]->unavailable)
3609 static void reset_eagain(void)
3612 for (i = 0; i < nb_input_files; i++)
3613 input_files[i]->eagain = 0;
3614 for (i = 0; i < nb_output_streams; i++)
3615 output_streams[i]->unavailable = 0;
3618 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3619 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3620 AVRational time_base)
3626 return tmp_time_base;
3629 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3632 return tmp_time_base;
3638 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3641 AVCodecContext *avctx;
3642 int i, ret, has_audio = 0;
3643 int64_t duration = 0;
3645 ret = av_seek_frame(is, -1, is->start_time, 0);
3649 for (i = 0; i < ifile->nb_streams; i++) {
3650 ist = input_streams[ifile->ist_index + i];
3651 avctx = ist->dec_ctx;
3654 if (ist->decoding_needed) {
3655 process_input_packet(ist, NULL, 1);
3656 avcodec_flush_buffers(avctx);
3659 /* duration is the length of the last frame in a stream
3660 * when audio stream is present we don't care about
3661 * last video frame length because it's not defined exactly */
3662 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3666 for (i = 0; i < ifile->nb_streams; i++) {
3667 ist = input_streams[ifile->ist_index + i];
3668 avctx = ist->dec_ctx;
3671 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3672 AVRational sample_rate = {1, avctx->sample_rate};
3674 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3678 if (ist->framerate.num) {
3679 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3680 } else if (ist->st->avg_frame_rate.num) {
3681 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3682 } else duration = 1;
3684 if (!ifile->duration)
3685 ifile->time_base = ist->st->time_base;
3686 /* the total duration of the stream, max_pts - min_pts is
3687 * the duration of the stream without the last frame */
3688 duration += ist->max_pts - ist->min_pts;
3689 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3700 * - 0 -- one packet was read and processed
3701 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3702 * this function should be called again
3703 * - AVERROR_EOF -- this function should not be called again
3705 static int process_input(int file_index)
3707 InputFile *ifile = input_files[file_index];
3708 AVFormatContext *is;
3715 ret = get_input_packet(ifile, &pkt);
3717 if (ret == AVERROR(EAGAIN)) {
3721 if ((ret < 0) && (ifile->loop > 1)) {
3722 if ((ret = seek_to_start(ifile, is)) < 0)
3724 ret = get_input_packet(ifile, &pkt);
3727 if (ret != AVERROR_EOF) {
3728 print_error(is->filename, ret);
3733 for (i = 0; i < ifile->nb_streams; i++) {
3734 ist = input_streams[ifile->ist_index + i];
3735 if (ist->decoding_needed) {
3736 ret = process_input_packet(ist, NULL, 0);
3741 /* mark all outputs that don't go through lavfi as finished */
3742 for (j = 0; j < nb_output_streams; j++) {
3743 OutputStream *ost = output_streams[j];
3745 if (ost->source_index == ifile->ist_index + i &&
3746 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3747 finish_output_stream(ost);
3751 ifile->eof_reached = 1;
3752 return AVERROR(EAGAIN);
3758 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3759 is->streams[pkt.stream_index]);
3761 /* the following test is needed in case new streams appear
3762 dynamically in stream : we ignore them */
3763 if (pkt.stream_index >= ifile->nb_streams) {
3764 report_new_stream(file_index, &pkt);
3765 goto discard_packet;
3768 ist = input_streams[ifile->ist_index + pkt.stream_index];
3770 ist->data_size += pkt.size;
3774 goto discard_packet;
3777 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3778 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3779 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3780 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3781 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3782 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3783 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3784 av_ts2str(input_files[ist->file_index]->ts_offset),
3785 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3788 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3789 int64_t stime, stime2;
3790 // Correcting starttime based on the enabled streams
3791 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3792 // so we instead do it here as part of discontinuity handling
3793 if ( ist->next_dts == AV_NOPTS_VALUE
3794 && ifile->ts_offset == -is->start_time
3795 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3796 int64_t new_start_time = INT64_MAX;
3797 for (i=0; i<is->nb_streams; i++) {
3798 AVStream *st = is->streams[i];
3799 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3801 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3803 if (new_start_time > is->start_time) {
3804 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3805 ifile->ts_offset = -new_start_time;
3809 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3810 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3811 ist->wrap_correction_done = 1;
3813 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3814 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3815 ist->wrap_correction_done = 0;
3817 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3818 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3819 ist->wrap_correction_done = 0;
3823 /* add the stream-global side data to the first packet */
3824 if (ist->nb_packets == 1) {
3825 if (ist->st->nb_side_data)
3826 av_packet_split_side_data(&pkt);
3827 for (i = 0; i < ist->st->nb_side_data; i++) {
3828 AVPacketSideData *src_sd = &ist->st->side_data[i];
3831 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3833 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3836 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3840 memcpy(dst_data, src_sd->data, src_sd->size);
3844 if (pkt.dts != AV_NOPTS_VALUE)
3845 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3846 if (pkt.pts != AV_NOPTS_VALUE)
3847 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3849 if (pkt.pts != AV_NOPTS_VALUE)
3850 pkt.pts *= ist->ts_scale;
3851 if (pkt.dts != AV_NOPTS_VALUE)
3852 pkt.dts *= ist->ts_scale;
3854 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3855 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3856 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3857 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3858 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3859 int64_t delta = pkt_dts - ifile->last_ts;
3860 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3861 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3862 ifile->ts_offset -= delta;
3863 av_log(NULL, AV_LOG_DEBUG,
3864 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3865 delta, ifile->ts_offset);
3866 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3867 if (pkt.pts != AV_NOPTS_VALUE)
3868 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3872 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3873 if (pkt.pts != AV_NOPTS_VALUE) {
3874 pkt.pts += duration;
3875 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3876 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3879 if (pkt.dts != AV_NOPTS_VALUE)
3880 pkt.dts += duration;
3882 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3883 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3884 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3886 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3887 int64_t delta = pkt_dts - ist->next_dts;
3888 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3889 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3890 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3891 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3892 ifile->ts_offset -= delta;
3893 av_log(NULL, AV_LOG_DEBUG,
3894 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3895 delta, ifile->ts_offset);
3896 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3897 if (pkt.pts != AV_NOPTS_VALUE)
3898 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3901 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3902 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3903 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3904 pkt.dts = AV_NOPTS_VALUE;
3906 if (pkt.pts != AV_NOPTS_VALUE){
3907 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3908 delta = pkt_pts - ist->next_dts;
3909 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3910 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3911 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3912 pkt.pts = AV_NOPTS_VALUE;
3918 if (pkt.dts != AV_NOPTS_VALUE)
3919 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3922 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3923 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3924 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3925 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3926 av_ts2str(input_files[ist->file_index]->ts_offset),
3927 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3930 sub2video_heartbeat(ist, pkt.pts);
3932 process_input_packet(ist, &pkt, 0);
3935 av_free_packet(&pkt);
3941 * Perform a step of transcoding for the specified filter graph.
3943 * @param[in] graph filter graph to consider
3944 * @param[out] best_ist input stream where a frame would allow to continue
3945 * @return 0 for success, <0 for error
3947 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3950 int nb_requests, nb_requests_max = 0;
3951 InputFilter *ifilter;
3955 ret = avfilter_graph_request_oldest(graph->graph);
3957 return reap_filters(0);
3959 if (ret == AVERROR_EOF) {
3960 ret = reap_filters(1);
3961 for (i = 0; i < graph->nb_outputs; i++)
3962 close_output_stream(graph->outputs[i]->ost);
3965 if (ret != AVERROR(EAGAIN))
3968 for (i = 0; i < graph->nb_inputs; i++) {
3969 ifilter = graph->inputs[i];
3971 if (input_files[ist->file_index]->eagain ||
3972 input_files[ist->file_index]->eof_reached)
3974 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3975 if (nb_requests > nb_requests_max) {
3976 nb_requests_max = nb_requests;
3982 for (i = 0; i < graph->nb_outputs; i++)
3983 graph->outputs[i]->ost->unavailable = 1;
3989 * Run a single step of transcoding.
3991 * @return 0 for success, <0 for error
3993 static int transcode_step(void)
3999 ost = choose_output();
4006 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4011 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4016 av_assert0(ost->source_index >= 0);
4017 ist = input_streams[ost->source_index];
4020 ret = process_input(ist->file_index);
4021 if (ret == AVERROR(EAGAIN)) {
4022 if (input_files[ist->file_index]->eagain)
4023 ost->unavailable = 1;
4028 return ret == AVERROR_EOF ? 0 : ret;
4030 return reap_filters(0);
4034 * The following code is the main loop of the file converter
4036 static int transcode(void)
4039 AVFormatContext *os;
4042 int64_t timer_start;
4044 ret = transcode_init();
4048 if (stdin_interaction) {
4049 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4052 timer_start = av_gettime_relative();
4055 if ((ret = init_input_threads()) < 0)
4059 while (!received_sigterm) {
4060 int64_t cur_time= av_gettime_relative();
4062 /* if 'q' pressed, exits */
4063 if (stdin_interaction)
4064 if (check_keyboard_interaction(cur_time) < 0)
4067 /* check if there's any stream where output is still needed */
4068 if (!need_output()) {
4069 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4073 ret = transcode_step();
4075 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4079 av_strerror(ret, errbuf, sizeof(errbuf));
4081 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4086 /* dump report by using the output first video and audio streams */
4087 print_report(0, timer_start, cur_time);
4090 free_input_threads();
4093 /* at the end of stream, we must flush the decoder buffers */
4094 for (i = 0; i < nb_input_streams; i++) {
4095 ist = input_streams[i];
4096 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4097 process_input_packet(ist, NULL, 0);
4104 /* write the trailer if needed and close file */
4105 for (i = 0; i < nb_output_files; i++) {
4106 os = output_files[i]->ctx;
4107 av_write_trailer(os);
4110 /* dump report by using the first video and audio streams */
4111 print_report(1, timer_start, av_gettime_relative());
4113 /* close each encoder */
4114 for (i = 0; i < nb_output_streams; i++) {
4115 ost = output_streams[i];
4116 if (ost->encoding_needed) {
4117 av_freep(&ost->enc_ctx->stats_in);
4121 /* close each decoder */
4122 for (i = 0; i < nb_input_streams; i++) {
4123 ist = input_streams[i];
4124 if (ist->decoding_needed) {
4125 avcodec_close(ist->dec_ctx);
4126 if (ist->hwaccel_uninit)
4127 ist->hwaccel_uninit(ist->dec_ctx);
4136 free_input_threads();
4139 if (output_streams) {
4140 for (i = 0; i < nb_output_streams; i++) {
4141 ost = output_streams[i];
4144 fclose(ost->logfile);
4145 ost->logfile = NULL;
4147 av_freep(&ost->forced_kf_pts);
4148 av_freep(&ost->apad);
4149 av_freep(&ost->disposition);
4150 av_dict_free(&ost->encoder_opts);
4151 av_dict_free(&ost->sws_dict);
4152 av_dict_free(&ost->swr_opts);
4153 av_dict_free(&ost->resample_opts);
4154 av_dict_free(&ost->bsf_args);
4162 static int64_t getutime(void)
4165 struct rusage rusage;
4167 getrusage(RUSAGE_SELF, &rusage);
4168 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4169 #elif HAVE_GETPROCESSTIMES
4171 FILETIME c, e, k, u;
4172 proc = GetCurrentProcess();
4173 GetProcessTimes(proc, &c, &e, &k, &u);
4174 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4176 return av_gettime_relative();
4180 static int64_t getmaxrss(void)
4182 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4183 struct rusage rusage;
4184 getrusage(RUSAGE_SELF, &rusage);
4185 return (int64_t)rusage.ru_maxrss * 1024;
4186 #elif HAVE_GETPROCESSMEMORYINFO
4188 PROCESS_MEMORY_COUNTERS memcounters;
4189 proc = GetCurrentProcess();
4190 memcounters.cb = sizeof(memcounters);
4191 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4192 return memcounters.PeakPagefileUsage;
4198 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4202 int main(int argc, char **argv)
4207 register_exit(ffmpeg_cleanup);
4209 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4211 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4212 parse_loglevel(argc, argv, options);
4214 if(argc>1 && !strcmp(argv[1], "-d")){
4216 av_log_set_callback(log_callback_null);
4221 avcodec_register_all();
4223 avdevice_register_all();
4225 avfilter_register_all();
4227 avformat_network_init();
4229 show_banner(argc, argv, options);
4233 /* parse options and open all input/output files */
4234 ret = ffmpeg_parse_options(argc, argv);
4238 if (nb_output_files <= 0 && nb_input_files == 0) {
4240 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4244 /* file converter / grab */
4245 if (nb_output_files <= 0) {
4246 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4250 // if (nb_input_files == 0) {
4251 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4255 current_time = ti = getutime();
4256 if (transcode() < 0)
4258 ti = getutime() - ti;
4260 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4262 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4263 decode_error_stat[0], decode_error_stat[1]);
4264 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4267 exit_program(received_nb_signals ? 255 : main_return_code);
4268 return main_return_code;