2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
677 av_packet_split_side_data(pkt);
680 AVPacket new_pkt = *pkt;
681 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
684 int a = av_bitstream_filter_filter(bsfc, avctx,
685 bsf_arg ? bsf_arg->value : NULL,
686 &new_pkt.data, &new_pkt.size,
687 pkt->data, pkt->size,
688 pkt->flags & AV_PKT_FLAG_KEY);
689 if(a == 0 && new_pkt.data != pkt->data) {
690 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
692 memcpy(t, new_pkt.data, new_pkt.size);
693 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
701 pkt->side_data = NULL;
702 pkt->side_data_elems = 0;
704 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
705 av_buffer_default_free, NULL, 0);
710 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
711 bsfc->filter->name, pkt->stream_index,
712 avctx->codec ? avctx->codec->name : "copy");
722 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
723 if (pkt->dts != AV_NOPTS_VALUE &&
724 pkt->pts != AV_NOPTS_VALUE &&
725 pkt->dts > pkt->pts) {
726 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
728 ost->file_index, ost->st->index);
730 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
731 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
732 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
735 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
736 pkt->dts != AV_NOPTS_VALUE &&
737 ost->last_mux_dts != AV_NOPTS_VALUE) {
738 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
739 if (pkt->dts < max) {
740 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
741 av_log(s, loglevel, "Non-monotonous DTS in output stream "
742 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
743 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
745 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
748 av_log(s, loglevel, "changing to %"PRId64". This may result "
749 "in incorrect timestamps in the output file.\n",
751 if(pkt->pts >= pkt->dts)
752 pkt->pts = FFMAX(pkt->pts, max);
757 ost->last_mux_dts = pkt->dts;
759 ost->data_size += pkt->size;
760 ost->packets_written++;
762 pkt->stream_index = ost->index;
765 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
766 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
767 av_get_media_type_string(ost->enc_ctx->codec_type),
768 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
769 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
774 ret = av_interleaved_write_frame(s, pkt);
776 print_error("av_interleaved_write_frame()", ret);
777 main_return_code = 1;
778 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
783 static void close_output_stream(OutputStream *ost)
785 OutputFile *of = output_files[ost->file_index];
787 ost->finished |= ENCODER_FINISHED;
789 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
790 of->recording_time = FFMIN(of->recording_time, end);
794 static int check_recording_time(OutputStream *ost)
796 OutputFile *of = output_files[ost->file_index];
798 if (of->recording_time != INT64_MAX &&
799 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
800 AV_TIME_BASE_Q) >= 0) {
801 close_output_stream(ost);
807 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
810 AVCodecContext *enc = ost->enc_ctx;
814 av_init_packet(&pkt);
818 if (!check_recording_time(ost))
821 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
822 frame->pts = ost->sync_opts;
823 ost->sync_opts = frame->pts + frame->nb_samples;
824 ost->samples_encoded += frame->nb_samples;
825 ost->frames_encoded++;
827 av_assert0(pkt.size || !pkt.data);
828 update_benchmark(NULL);
830 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
831 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
832 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
833 enc->time_base.num, enc->time_base.den);
836 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
837 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
840 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
843 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
846 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
847 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
848 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
849 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
852 write_frame(s, &pkt, ost);
856 static void do_subtitle_out(AVFormatContext *s,
861 int subtitle_out_max_size = 1024 * 1024;
862 int subtitle_out_size, nb, i;
867 if (sub->pts == AV_NOPTS_VALUE) {
868 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
877 subtitle_out = av_malloc(subtitle_out_max_size);
879 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
884 /* Note: DVB subtitle need one packet to draw them and one other
885 packet to clear them */
886 /* XXX: signal it in the codec context ? */
887 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
892 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
894 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
895 pts -= output_files[ost->file_index]->start_time;
896 for (i = 0; i < nb; i++) {
897 unsigned save_num_rects = sub->num_rects;
899 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
900 if (!check_recording_time(ost))
904 // start_display_time is required to be 0
905 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
906 sub->end_display_time -= sub->start_display_time;
907 sub->start_display_time = 0;
911 ost->frames_encoded++;
913 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
914 subtitle_out_max_size, sub);
916 sub->num_rects = save_num_rects;
917 if (subtitle_out_size < 0) {
918 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
922 av_init_packet(&pkt);
923 pkt.data = subtitle_out;
924 pkt.size = subtitle_out_size;
925 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
926 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
927 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
928 /* XXX: the pts correction is handled here. Maybe handling
929 it in the codec would be better */
931 pkt.pts += 90 * sub->start_display_time;
933 pkt.pts += 90 * sub->end_display_time;
936 write_frame(s, &pkt, ost);
940 static void do_video_out(AVFormatContext *s,
942 AVFrame *next_picture,
945 int ret, format_video_sync;
947 AVCodecContext *enc = ost->enc_ctx;
948 AVCodecContext *mux_enc = ost->st->codec;
949 int nb_frames, nb0_frames, i;
950 double delta, delta0;
953 InputStream *ist = NULL;
954 AVFilterContext *filter = ost->filter->filter;
956 if (ost->source_index >= 0)
957 ist = input_streams[ost->source_index];
959 if (filter->inputs[0]->frame_rate.num > 0 &&
960 filter->inputs[0]->frame_rate.den > 0)
961 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
963 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
964 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
966 if (!ost->filters_script &&
970 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
971 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
976 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
977 ost->last_nb0_frames[1],
978 ost->last_nb0_frames[2]);
980 delta0 = sync_ipts - ost->sync_opts;
981 delta = delta0 + duration;
983 /* by default, we output a single frame */
987 format_video_sync = video_sync_method;
988 if (format_video_sync == VSYNC_AUTO) {
989 if(!strcmp(s->oformat->name, "avi")) {
990 format_video_sync = VSYNC_VFR;
992 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
994 && format_video_sync == VSYNC_CFR
995 && input_files[ist->file_index]->ctx->nb_streams == 1
996 && input_files[ist->file_index]->input_ts_offset == 0) {
997 format_video_sync = VSYNC_VSCFR;
999 if (format_video_sync == VSYNC_CFR && copy_ts) {
1000 format_video_sync = VSYNC_VSCFR;
1006 format_video_sync != VSYNC_PASSTHROUGH &&
1007 format_video_sync != VSYNC_DROP) {
1008 double cor = FFMIN(-delta0, duration);
1009 if (delta0 < -0.6) {
1010 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1012 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1018 switch (format_video_sync) {
1020 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1021 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1024 ost->sync_opts = lrint(sync_ipts);
1027 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1028 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1030 } else if (delta < -1.1)
1032 else if (delta > 1.1) {
1033 nb_frames = lrintf(delta);
1035 nb0_frames = lrintf(delta0 - 0.6);
1041 else if (delta > 0.6)
1042 ost->sync_opts = lrint(sync_ipts);
1045 case VSYNC_PASSTHROUGH:
1046 ost->sync_opts = lrint(sync_ipts);
1053 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1054 nb0_frames = FFMIN(nb0_frames, nb_frames);
1056 memmove(ost->last_nb0_frames + 1,
1057 ost->last_nb0_frames,
1058 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1059 ost->last_nb0_frames[0] = nb0_frames;
1061 if (nb0_frames == 0 && ost->last_droped) {
1063 av_log(NULL, AV_LOG_VERBOSE,
1064 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1065 ost->frame_number, ost->st->index, ost->last_frame->pts);
1067 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1068 if (nb_frames > dts_error_threshold * 30) {
1069 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1073 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1074 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1076 ost->last_droped = nb_frames == nb0_frames && next_picture;
1078 /* duplicates frame if needed */
1079 for (i = 0; i < nb_frames; i++) {
1080 AVFrame *in_picture;
1081 av_init_packet(&pkt);
1085 if (i < nb0_frames && ost->last_frame) {
1086 in_picture = ost->last_frame;
1088 in_picture = next_picture;
1093 in_picture->pts = ost->sync_opts;
1096 if (!check_recording_time(ost))
1098 if (ost->frame_number >= ost->max_frames)
1102 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1103 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1104 /* raw pictures are written as AVPicture structure to
1105 avoid any copies. We support temporarily the older
1107 if (in_picture->interlaced_frame)
1108 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1110 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1111 pkt.data = (uint8_t *)in_picture;
1112 pkt.size = sizeof(AVPicture);
1113 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1114 pkt.flags |= AV_PKT_FLAG_KEY;
1116 write_frame(s, &pkt, ost);
1118 int got_packet, forced_keyframe = 0;
1121 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1122 ost->top_field_first >= 0)
1123 in_picture->top_field_first = !!ost->top_field_first;
1125 if (in_picture->interlaced_frame) {
1126 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1127 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1129 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1131 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1133 in_picture->quality = enc->global_quality;
1134 in_picture->pict_type = 0;
1136 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1137 in_picture->pts * av_q2d(enc->time_base) : NAN;
1138 if (ost->forced_kf_index < ost->forced_kf_count &&
1139 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1140 ost->forced_kf_index++;
1141 forced_keyframe = 1;
1142 } else if (ost->forced_keyframes_pexpr) {
1144 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1145 res = av_expr_eval(ost->forced_keyframes_pexpr,
1146 ost->forced_keyframes_expr_const_values, NULL);
1147 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1148 ost->forced_keyframes_expr_const_values[FKF_N],
1149 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1150 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1151 ost->forced_keyframes_expr_const_values[FKF_T],
1152 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1155 forced_keyframe = 1;
1156 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1157 ost->forced_keyframes_expr_const_values[FKF_N];
1158 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1159 ost->forced_keyframes_expr_const_values[FKF_T];
1160 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1163 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1164 } else if ( ost->forced_keyframes
1165 && !strncmp(ost->forced_keyframes, "source", 6)
1166 && in_picture->key_frame==1) {
1167 forced_keyframe = 1;
1170 if (forced_keyframe) {
1171 in_picture->pict_type = AV_PICTURE_TYPE_I;
1172 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1175 update_benchmark(NULL);
1177 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1178 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1179 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1180 enc->time_base.num, enc->time_base.den);
1183 ost->frames_encoded++;
1185 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1186 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1188 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1194 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1195 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1196 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1197 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1200 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1201 pkt.pts = ost->sync_opts;
1203 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1206 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1207 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1208 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1209 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1212 frame_size = pkt.size;
1213 write_frame(s, &pkt, ost);
1215 /* if two pass, output log */
1216 if (ost->logfile && enc->stats_out) {
1217 fprintf(ost->logfile, "%s", enc->stats_out);
1223 * For video, number of frames in == number of packets out.
1224 * But there may be reordering, so we can't throw away frames on encoder
1225 * flush, we need to limit them here, before they go into encoder.
1227 ost->frame_number++;
1229 if (vstats_filename && frame_size)
1230 do_video_stats(ost, frame_size);
1233 if (!ost->last_frame)
1234 ost->last_frame = av_frame_alloc();
1235 av_frame_unref(ost->last_frame);
1236 if (next_picture && ost->last_frame)
1237 av_frame_ref(ost->last_frame, next_picture);
1239 av_frame_free(&ost->last_frame);
1242 static double psnr(double d)
1244 return -10.0 * log(d) / log(10.0);
1247 static void do_video_stats(OutputStream *ost, int frame_size)
1249 AVCodecContext *enc;
1251 double ti1, bitrate, avg_bitrate;
1253 /* this is executed just the first time do_video_stats is called */
1255 vstats_file = fopen(vstats_filename, "w");
1263 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1264 frame_number = ost->st->nb_frames;
1265 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1266 ost->quality / (float)FF_QP2LAMBDA);
1268 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1269 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1271 fprintf(vstats_file,"f_size= %6d ", frame_size);
1272 /* compute pts value */
1273 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1277 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1278 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1279 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1280 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1281 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1285 static void finish_output_stream(OutputStream *ost)
1287 OutputFile *of = output_files[ost->file_index];
1290 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1293 for (i = 0; i < of->ctx->nb_streams; i++)
1294 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1299 * Get and encode new output from any of the filtergraphs, without causing
1302 * @return 0 for success, <0 for severe errors
1304 static int reap_filters(int flush)
1306 AVFrame *filtered_frame = NULL;
1309 /* Reap all buffers present in the buffer sinks */
1310 for (i = 0; i < nb_output_streams; i++) {
1311 OutputStream *ost = output_streams[i];
1312 OutputFile *of = output_files[ost->file_index];
1313 AVFilterContext *filter;
1314 AVCodecContext *enc = ost->enc_ctx;
1319 filter = ost->filter->filter;
1321 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1322 return AVERROR(ENOMEM);
1324 filtered_frame = ost->filtered_frame;
1327 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1328 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1329 AV_BUFFERSINK_FLAG_NO_REQUEST);
1331 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1332 av_log(NULL, AV_LOG_WARNING,
1333 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1334 } else if (flush && ret == AVERROR_EOF) {
1335 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1336 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1340 if (ost->finished) {
1341 av_frame_unref(filtered_frame);
1344 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1345 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1346 AVRational tb = enc->time_base;
1347 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1349 tb.den <<= extra_bits;
1351 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1352 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1353 float_pts /= 1 << extra_bits;
1354 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1355 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1357 filtered_frame->pts =
1358 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1359 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1361 //if (ost->source_index >= 0)
1362 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1364 switch (filter->inputs[0]->type) {
1365 case AVMEDIA_TYPE_VIDEO:
1366 if (!ost->frame_aspect_ratio.num)
1367 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1370 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1371 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1373 enc->time_base.num, enc->time_base.den);
1376 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1378 case AVMEDIA_TYPE_AUDIO:
1379 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1380 enc->channels != av_frame_get_channels(filtered_frame)) {
1381 av_log(NULL, AV_LOG_ERROR,
1382 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1385 do_audio_out(of->ctx, ost, filtered_frame);
1388 // TODO support subtitle filters
1392 av_frame_unref(filtered_frame);
1399 static void print_final_stats(int64_t total_size)
1401 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1402 uint64_t subtitle_size = 0;
1403 uint64_t data_size = 0;
1404 float percent = -1.0;
1408 for (i = 0; i < nb_output_streams; i++) {
1409 OutputStream *ost = output_streams[i];
1410 switch (ost->enc_ctx->codec_type) {
1411 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1412 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1413 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1414 default: other_size += ost->data_size; break;
1416 extra_size += ost->enc_ctx->extradata_size;
1417 data_size += ost->data_size;
1418 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1419 != AV_CODEC_FLAG_PASS1)
1423 if (data_size && total_size>0 && total_size >= data_size)
1424 percent = 100.0 * (total_size - data_size) / data_size;
1426 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1427 video_size / 1024.0,
1428 audio_size / 1024.0,
1429 subtitle_size / 1024.0,
1430 other_size / 1024.0,
1431 extra_size / 1024.0);
1433 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1435 av_log(NULL, AV_LOG_INFO, "unknown");
1436 av_log(NULL, AV_LOG_INFO, "\n");
1438 /* print verbose per-stream stats */
1439 for (i = 0; i < nb_input_files; i++) {
1440 InputFile *f = input_files[i];
1441 uint64_t total_packets = 0, total_size = 0;
1443 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1444 i, f->ctx->filename);
1446 for (j = 0; j < f->nb_streams; j++) {
1447 InputStream *ist = input_streams[f->ist_index + j];
1448 enum AVMediaType type = ist->dec_ctx->codec_type;
1450 total_size += ist->data_size;
1451 total_packets += ist->nb_packets;
1453 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1454 i, j, media_type_string(type));
1455 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1456 ist->nb_packets, ist->data_size);
1458 if (ist->decoding_needed) {
1459 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1460 ist->frames_decoded);
1461 if (type == AVMEDIA_TYPE_AUDIO)
1462 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1463 av_log(NULL, AV_LOG_VERBOSE, "; ");
1466 av_log(NULL, AV_LOG_VERBOSE, "\n");
1469 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1470 total_packets, total_size);
1473 for (i = 0; i < nb_output_files; i++) {
1474 OutputFile *of = output_files[i];
1475 uint64_t total_packets = 0, total_size = 0;
1477 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1478 i, of->ctx->filename);
1480 for (j = 0; j < of->ctx->nb_streams; j++) {
1481 OutputStream *ost = output_streams[of->ost_index + j];
1482 enum AVMediaType type = ost->enc_ctx->codec_type;
1484 total_size += ost->data_size;
1485 total_packets += ost->packets_written;
1487 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1488 i, j, media_type_string(type));
1489 if (ost->encoding_needed) {
1490 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1491 ost->frames_encoded);
1492 if (type == AVMEDIA_TYPE_AUDIO)
1493 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1494 av_log(NULL, AV_LOG_VERBOSE, "; ");
1497 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1498 ost->packets_written, ost->data_size);
1500 av_log(NULL, AV_LOG_VERBOSE, "\n");
1503 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1504 total_packets, total_size);
1506 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1507 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1509 av_log(NULL, AV_LOG_WARNING, "\n");
1511 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1516 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1519 AVBPrint buf_script;
1521 AVFormatContext *oc;
1523 AVCodecContext *enc;
1524 int frame_number, vid, i;
1526 int64_t pts = INT64_MIN + 1;
1527 static int64_t last_time = -1;
1528 static int qp_histogram[52];
1529 int hours, mins, secs, us;
1531 if (!print_stats && !is_last_report && !progress_avio)
1534 if (!is_last_report) {
1535 if (last_time == -1) {
1536 last_time = cur_time;
1539 if ((cur_time - last_time) < 500000)
1541 last_time = cur_time;
1545 oc = output_files[0]->ctx;
1547 total_size = avio_size(oc->pb);
1548 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1549 total_size = avio_tell(oc->pb);
1553 av_bprint_init(&buf_script, 0, 1);
1554 for (i = 0; i < nb_output_streams; i++) {
1556 ost = output_streams[i];
1558 if (!ost->stream_copy)
1559 q = ost->quality / (float) FF_QP2LAMBDA;
1561 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1563 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1564 ost->file_index, ost->index, q);
1566 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1567 float fps, t = (cur_time-timer_start) / 1000000.0;
1569 frame_number = ost->frame_number;
1570 fps = t > 1 ? frame_number / t : 0;
1571 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1572 frame_number, fps < 9.95, fps, q);
1573 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1574 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1575 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1576 ost->file_index, ost->index, q);
1578 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1582 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1584 for (j = 0; j < 32; j++)
1585 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1588 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1590 double error, error_sum = 0;
1591 double scale, scale_sum = 0;
1593 char type[3] = { 'Y','U','V' };
1594 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1595 for (j = 0; j < 3; j++) {
1596 if (is_last_report) {
1597 error = enc->error[j];
1598 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1600 error = ost->error[j];
1601 scale = enc->width * enc->height * 255.0 * 255.0;
1607 p = psnr(error / scale);
1608 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1609 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1610 ost->file_index, ost->index, type[j] | 32, p);
1612 p = psnr(error_sum / scale_sum);
1613 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1614 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1615 ost->file_index, ost->index, p);
1619 /* compute min output value */
1620 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1621 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1622 ost->st->time_base, AV_TIME_BASE_Q));
1624 nb_frames_drop += ost->last_droped;
1627 secs = FFABS(pts) / AV_TIME_BASE;
1628 us = FFABS(pts) % AV_TIME_BASE;
1634 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1636 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1638 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1639 "size=%8.0fkB time=", total_size / 1024.0);
1641 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1642 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1643 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1644 (100 * us) / AV_TIME_BASE);
1647 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1648 av_bprintf(&buf_script, "bitrate=N/A\n");
1650 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1651 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1654 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1655 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1656 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1657 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1658 hours, mins, secs, us);
1660 if (nb_frames_dup || nb_frames_drop)
1661 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1662 nb_frames_dup, nb_frames_drop);
1663 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1664 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1666 if (print_stats || is_last_report) {
1667 const char end = is_last_report ? '\n' : '\r';
1668 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1669 fprintf(stderr, "%s %c", buf, end);
1671 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1676 if (progress_avio) {
1677 av_bprintf(&buf_script, "progress=%s\n",
1678 is_last_report ? "end" : "continue");
1679 avio_write(progress_avio, buf_script.str,
1680 FFMIN(buf_script.len, buf_script.size - 1));
1681 avio_flush(progress_avio);
1682 av_bprint_finalize(&buf_script, NULL);
1683 if (is_last_report) {
1684 avio_closep(&progress_avio);
1689 print_final_stats(total_size);
1692 static void flush_encoders(void)
1696 for (i = 0; i < nb_output_streams; i++) {
1697 OutputStream *ost = output_streams[i];
1698 AVCodecContext *enc = ost->enc_ctx;
1699 AVFormatContext *os = output_files[ost->file_index]->ctx;
1700 int stop_encoding = 0;
1702 if (!ost->encoding_needed)
1705 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1707 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1711 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1714 switch (enc->codec_type) {
1715 case AVMEDIA_TYPE_AUDIO:
1716 encode = avcodec_encode_audio2;
1719 case AVMEDIA_TYPE_VIDEO:
1720 encode = avcodec_encode_video2;
1731 av_init_packet(&pkt);
1735 update_benchmark(NULL);
1736 ret = encode(enc, &pkt, NULL, &got_packet);
1737 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1739 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1744 if (ost->logfile && enc->stats_out) {
1745 fprintf(ost->logfile, "%s", enc->stats_out);
1751 if (ost->finished & MUXER_FINISHED) {
1752 av_free_packet(&pkt);
1755 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1756 pkt_size = pkt.size;
1757 write_frame(os, &pkt, ost);
1758 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1759 do_video_stats(ost, pkt_size);
1770 * Check whether a packet from ist should be written into ost at this time
1772 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1774 OutputFile *of = output_files[ost->file_index];
1775 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1777 if (ost->source_index != ist_index)
1783 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1789 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1791 OutputFile *of = output_files[ost->file_index];
1792 InputFile *f = input_files [ist->file_index];
1793 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1794 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1795 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1799 av_init_packet(&opkt);
1801 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1802 !ost->copy_initial_nonkeyframes)
1805 if (pkt->pts == AV_NOPTS_VALUE) {
1806 if (!ost->frame_number && ist->pts < start_time &&
1807 !ost->copy_prior_start)
1810 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1811 !ost->copy_prior_start)
1815 if (of->recording_time != INT64_MAX &&
1816 ist->pts >= of->recording_time + start_time) {
1817 close_output_stream(ost);
1821 if (f->recording_time != INT64_MAX) {
1822 start_time = f->ctx->start_time;
1823 if (f->start_time != AV_NOPTS_VALUE)
1824 start_time += f->start_time;
1825 if (ist->pts >= f->recording_time + start_time) {
1826 close_output_stream(ost);
1831 /* force the input stream PTS */
1832 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1835 if (pkt->pts != AV_NOPTS_VALUE)
1836 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1838 opkt.pts = AV_NOPTS_VALUE;
1840 if (pkt->dts == AV_NOPTS_VALUE)
1841 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1843 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1844 opkt.dts -= ost_tb_start_time;
1846 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1847 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1849 duration = ist->dec_ctx->frame_size;
1850 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1851 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1852 ost->st->time_base) - ost_tb_start_time;
1855 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1856 opkt.flags = pkt->flags;
1857 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1858 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1859 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1860 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1861 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1863 int ret = av_parser_change(ost->parser, ost->st->codec,
1864 &opkt.data, &opkt.size,
1865 pkt->data, pkt->size,
1866 pkt->flags & AV_PKT_FLAG_KEY);
1868 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1873 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1878 opkt.data = pkt->data;
1879 opkt.size = pkt->size;
1881 av_copy_packet_side_data(&opkt, pkt);
1883 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1884 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1885 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1886 /* store AVPicture in AVPacket, as expected by the output format */
1887 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1889 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1893 opkt.data = (uint8_t *)&pict;
1894 opkt.size = sizeof(AVPicture);
1895 opkt.flags |= AV_PKT_FLAG_KEY;
1898 write_frame(of->ctx, &opkt, ost);
1901 int guess_input_channel_layout(InputStream *ist)
1903 AVCodecContext *dec = ist->dec_ctx;
1905 if (!dec->channel_layout) {
1906 char layout_name[256];
1908 if (dec->channels > ist->guess_layout_max)
1910 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1911 if (!dec->channel_layout)
1913 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1914 dec->channels, dec->channel_layout);
1915 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1916 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1921 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1923 AVFrame *decoded_frame, *f;
1924 AVCodecContext *avctx = ist->dec_ctx;
1925 int i, ret, err = 0, resample_changed;
1926 AVRational decoded_frame_tb;
1928 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1929 return AVERROR(ENOMEM);
1930 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1931 return AVERROR(ENOMEM);
1932 decoded_frame = ist->decoded_frame;
1934 update_benchmark(NULL);
1935 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1936 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1938 if (ret >= 0 && avctx->sample_rate <= 0) {
1939 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1940 ret = AVERROR_INVALIDDATA;
1943 if (*got_output || ret<0)
1944 decode_error_stat[ret<0] ++;
1946 if (ret < 0 && exit_on_error)
1949 if (!*got_output || ret < 0)
1952 ist->samples_decoded += decoded_frame->nb_samples;
1953 ist->frames_decoded++;
1956 /* increment next_dts to use for the case where the input stream does not
1957 have timestamps or there are multiple frames in the packet */
1958 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1960 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1964 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1965 ist->resample_channels != avctx->channels ||
1966 ist->resample_channel_layout != decoded_frame->channel_layout ||
1967 ist->resample_sample_rate != decoded_frame->sample_rate;
1968 if (resample_changed) {
1969 char layout1[64], layout2[64];
1971 if (!guess_input_channel_layout(ist)) {
1972 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1973 "layout for Input Stream #%d.%d\n", ist->file_index,
1977 decoded_frame->channel_layout = avctx->channel_layout;
1979 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1980 ist->resample_channel_layout);
1981 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1982 decoded_frame->channel_layout);
1984 av_log(NULL, AV_LOG_INFO,
1985 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1986 ist->file_index, ist->st->index,
1987 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1988 ist->resample_channels, layout1,
1989 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1990 avctx->channels, layout2);
1992 ist->resample_sample_fmt = decoded_frame->format;
1993 ist->resample_sample_rate = decoded_frame->sample_rate;
1994 ist->resample_channel_layout = decoded_frame->channel_layout;
1995 ist->resample_channels = avctx->channels;
1997 for (i = 0; i < nb_filtergraphs; i++)
1998 if (ist_in_filtergraph(filtergraphs[i], ist)) {
1999 FilterGraph *fg = filtergraphs[i];
2000 if (configure_filtergraph(fg) < 0) {
2001 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2007 /* if the decoder provides a pts, use it instead of the last packet pts.
2008 the decoder could be delaying output by a packet or more. */
2009 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2010 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2011 decoded_frame_tb = avctx->time_base;
2012 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2013 decoded_frame->pts = decoded_frame->pkt_pts;
2014 decoded_frame_tb = ist->st->time_base;
2015 } else if (pkt->pts != AV_NOPTS_VALUE) {
2016 decoded_frame->pts = pkt->pts;
2017 decoded_frame_tb = ist->st->time_base;
2019 decoded_frame->pts = ist->dts;
2020 decoded_frame_tb = AV_TIME_BASE_Q;
2022 pkt->pts = AV_NOPTS_VALUE;
2023 if (decoded_frame->pts != AV_NOPTS_VALUE)
2024 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2025 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2026 (AVRational){1, avctx->sample_rate});
2027 ist->nb_samples = decoded_frame->nb_samples;
2028 for (i = 0; i < ist->nb_filters; i++) {
2029 if (i < ist->nb_filters - 1) {
2030 f = ist->filter_frame;
2031 err = av_frame_ref(f, decoded_frame);
2036 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2037 AV_BUFFERSRC_FLAG_PUSH);
2038 if (err == AVERROR_EOF)
2039 err = 0; /* ignore */
2043 decoded_frame->pts = AV_NOPTS_VALUE;
2045 av_frame_unref(ist->filter_frame);
2046 av_frame_unref(decoded_frame);
2047 return err < 0 ? err : ret;
2050 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2052 AVFrame *decoded_frame, *f;
2053 int i, ret = 0, err = 0, resample_changed;
2054 int64_t best_effort_timestamp;
2055 AVRational *frame_sample_aspect;
2057 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2058 return AVERROR(ENOMEM);
2059 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2060 return AVERROR(ENOMEM);
2061 decoded_frame = ist->decoded_frame;
2062 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2064 update_benchmark(NULL);
2065 ret = avcodec_decode_video2(ist->dec_ctx,
2066 decoded_frame, got_output, pkt);
2067 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2069 // The following line may be required in some cases where there is no parser
2070 // or the parser does not has_b_frames correctly
2071 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2072 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2073 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2075 av_log(ist->dec_ctx, AV_LOG_WARNING,
2076 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2077 "If you want to help, upload a sample "
2078 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2079 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2080 ist->dec_ctx->has_b_frames,
2081 ist->st->codec->has_b_frames);
2084 if (*got_output || ret<0)
2085 decode_error_stat[ret<0] ++;
2087 if (ret < 0 && exit_on_error)
2090 if (*got_output && ret >= 0) {
2091 if (ist->dec_ctx->width != decoded_frame->width ||
2092 ist->dec_ctx->height != decoded_frame->height ||
2093 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2094 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2095 decoded_frame->width,
2096 decoded_frame->height,
2097 decoded_frame->format,
2098 ist->dec_ctx->width,
2099 ist->dec_ctx->height,
2100 ist->dec_ctx->pix_fmt);
2104 if (!*got_output || ret < 0)
2107 if(ist->top_field_first>=0)
2108 decoded_frame->top_field_first = ist->top_field_first;
2110 ist->frames_decoded++;
2112 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2113 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2117 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2119 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2120 if(best_effort_timestamp != AV_NOPTS_VALUE)
2121 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2124 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2125 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2126 ist->st->index, av_ts2str(decoded_frame->pts),
2127 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2128 best_effort_timestamp,
2129 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2130 decoded_frame->key_frame, decoded_frame->pict_type,
2131 ist->st->time_base.num, ist->st->time_base.den);
2136 if (ist->st->sample_aspect_ratio.num)
2137 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2139 resample_changed = ist->resample_width != decoded_frame->width ||
2140 ist->resample_height != decoded_frame->height ||
2141 ist->resample_pix_fmt != decoded_frame->format;
2142 if (resample_changed) {
2143 av_log(NULL, AV_LOG_INFO,
2144 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2145 ist->file_index, ist->st->index,
2146 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2147 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2149 ist->resample_width = decoded_frame->width;
2150 ist->resample_height = decoded_frame->height;
2151 ist->resample_pix_fmt = decoded_frame->format;
2153 for (i = 0; i < nb_filtergraphs; i++) {
2154 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2155 configure_filtergraph(filtergraphs[i]) < 0) {
2156 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2162 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2163 for (i = 0; i < ist->nb_filters; i++) {
2164 if (!frame_sample_aspect->num)
2165 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2167 if (i < ist->nb_filters - 1) {
2168 f = ist->filter_frame;
2169 err = av_frame_ref(f, decoded_frame);
2174 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2175 if (ret == AVERROR_EOF) {
2176 ret = 0; /* ignore */
2177 } else if (ret < 0) {
2178 av_log(NULL, AV_LOG_FATAL,
2179 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2185 av_frame_unref(ist->filter_frame);
2186 av_frame_unref(decoded_frame);
2187 return err < 0 ? err : ret;
2190 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2192 AVSubtitle subtitle;
2193 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2194 &subtitle, got_output, pkt);
2196 if (*got_output || ret<0)
2197 decode_error_stat[ret<0] ++;
2199 if (ret < 0 && exit_on_error)
2202 if (ret < 0 || !*got_output) {
2204 sub2video_flush(ist);
2208 if (ist->fix_sub_duration) {
2210 if (ist->prev_sub.got_output) {
2211 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2212 1000, AV_TIME_BASE);
2213 if (end < ist->prev_sub.subtitle.end_display_time) {
2214 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2215 "Subtitle duration reduced from %d to %d%s\n",
2216 ist->prev_sub.subtitle.end_display_time, end,
2217 end <= 0 ? ", dropping it" : "");
2218 ist->prev_sub.subtitle.end_display_time = end;
2221 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2222 FFSWAP(int, ret, ist->prev_sub.ret);
2223 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2231 sub2video_update(ist, &subtitle);
2233 if (!subtitle.num_rects)
2236 ist->frames_decoded++;
2238 for (i = 0; i < nb_output_streams; i++) {
2239 OutputStream *ost = output_streams[i];
2241 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2242 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2245 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2249 avsubtitle_free(&subtitle);
2253 static int send_filter_eof(InputStream *ist)
2256 for (i = 0; i < ist->nb_filters; i++) {
2257 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2264 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2265 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2271 if (!ist->saw_first_ts) {
2272 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2274 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2275 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2276 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2278 ist->saw_first_ts = 1;
2281 if (ist->next_dts == AV_NOPTS_VALUE)
2282 ist->next_dts = ist->dts;
2283 if (ist->next_pts == AV_NOPTS_VALUE)
2284 ist->next_pts = ist->pts;
2288 av_init_packet(&avpkt);
2296 if (pkt->dts != AV_NOPTS_VALUE) {
2297 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2298 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2299 ist->next_pts = ist->pts = ist->dts;
2302 // while we have more to decode or while the decoder did output something on EOF
2303 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2307 ist->pts = ist->next_pts;
2308 ist->dts = ist->next_dts;
2310 if (avpkt.size && avpkt.size != pkt->size &&
2311 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2312 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2313 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2314 ist->showed_multi_packet_warning = 1;
2317 switch (ist->dec_ctx->codec_type) {
2318 case AVMEDIA_TYPE_AUDIO:
2319 ret = decode_audio (ist, &avpkt, &got_output);
2321 case AVMEDIA_TYPE_VIDEO:
2322 ret = decode_video (ist, &avpkt, &got_output);
2323 if (avpkt.duration) {
2324 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2325 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2326 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2327 duration = ((int64_t)AV_TIME_BASE *
2328 ist->dec_ctx->framerate.den * ticks) /
2329 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2333 if(ist->dts != AV_NOPTS_VALUE && duration) {
2334 ist->next_dts += duration;
2336 ist->next_dts = AV_NOPTS_VALUE;
2339 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2341 case AVMEDIA_TYPE_SUBTITLE:
2342 ret = transcode_subtitles(ist, &avpkt, &got_output);
2349 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2350 ist->file_index, ist->st->index, av_err2str(ret));
2357 avpkt.pts= AV_NOPTS_VALUE;
2359 // touch data and size only if not EOF
2361 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2369 if (got_output && !pkt)
2373 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2374 /* except when looping we need to flush but not to send an EOF */
2375 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2376 int ret = send_filter_eof(ist);
2378 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2383 /* handle stream copy */
2384 if (!ist->decoding_needed) {
2385 ist->dts = ist->next_dts;
2386 switch (ist->dec_ctx->codec_type) {
2387 case AVMEDIA_TYPE_AUDIO:
2388 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2389 ist->dec_ctx->sample_rate;
2391 case AVMEDIA_TYPE_VIDEO:
2392 if (ist->framerate.num) {
2393 // TODO: Remove work-around for c99-to-c89 issue 7
2394 AVRational time_base_q = AV_TIME_BASE_Q;
2395 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2396 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2397 } else if (pkt->duration) {
2398 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2399 } else if(ist->dec_ctx->framerate.num != 0) {
2400 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2401 ist->next_dts += ((int64_t)AV_TIME_BASE *
2402 ist->dec_ctx->framerate.den * ticks) /
2403 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2407 ist->pts = ist->dts;
2408 ist->next_pts = ist->next_dts;
2410 for (i = 0; pkt && i < nb_output_streams; i++) {
2411 OutputStream *ost = output_streams[i];
2413 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2416 do_streamcopy(ist, ost, pkt);
2422 static void print_sdp(void)
2427 AVIOContext *sdp_pb;
2428 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2432 for (i = 0, j = 0; i < nb_output_files; i++) {
2433 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2434 avc[j] = output_files[i]->ctx;
2439 av_sdp_create(avc, j, sdp, sizeof(sdp));
2441 if (!sdp_filename) {
2442 printf("SDP:\n%s\n", sdp);
2445 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2446 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2448 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2449 avio_closep(&sdp_pb);
2450 av_freep(&sdp_filename);
2457 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2460 for (i = 0; hwaccels[i].name; i++)
2461 if (hwaccels[i].pix_fmt == pix_fmt)
2462 return &hwaccels[i];
2466 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2468 InputStream *ist = s->opaque;
2469 const enum AVPixelFormat *p;
2472 for (p = pix_fmts; *p != -1; p++) {
2473 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2474 const HWAccel *hwaccel;
2476 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2479 hwaccel = get_hwaccel(*p);
2481 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2482 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2485 ret = hwaccel->init(s);
2487 if (ist->hwaccel_id == hwaccel->id) {
2488 av_log(NULL, AV_LOG_FATAL,
2489 "%s hwaccel requested for input stream #%d:%d, "
2490 "but cannot be initialized.\n", hwaccel->name,
2491 ist->file_index, ist->st->index);
2492 return AV_PIX_FMT_NONE;
2496 ist->active_hwaccel_id = hwaccel->id;
2497 ist->hwaccel_pix_fmt = *p;
2504 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2506 InputStream *ist = s->opaque;
2508 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2509 return ist->hwaccel_get_buffer(s, frame, flags);
2511 return avcodec_default_get_buffer2(s, frame, flags);
2514 static int init_input_stream(int ist_index, char *error, int error_len)
2517 InputStream *ist = input_streams[ist_index];
2519 if (ist->decoding_needed) {
2520 AVCodec *codec = ist->dec;
2522 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2523 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2524 return AVERROR(EINVAL);
2527 ist->dec_ctx->opaque = ist;
2528 ist->dec_ctx->get_format = get_format;
2529 ist->dec_ctx->get_buffer2 = get_buffer;
2530 ist->dec_ctx->thread_safe_callbacks = 1;
2532 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2533 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2534 (ist->decoding_needed & DECODING_FOR_OST)) {
2535 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2536 if (ist->decoding_needed & DECODING_FOR_FILTER)
2537 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2540 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2541 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2542 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2543 if (ret == AVERROR_EXPERIMENTAL)
2544 abort_codec_experimental(codec, 0);
2546 snprintf(error, error_len,
2547 "Error while opening decoder for input stream "
2549 ist->file_index, ist->st->index, av_err2str(ret));
2552 assert_avoptions(ist->decoder_opts);
2555 ist->next_pts = AV_NOPTS_VALUE;
2556 ist->next_dts = AV_NOPTS_VALUE;
2561 static InputStream *get_input_stream(OutputStream *ost)
2563 if (ost->source_index >= 0)
2564 return input_streams[ost->source_index];
2568 static int compare_int64(const void *a, const void *b)
2570 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2571 return va < vb ? -1 : va > vb ? +1 : 0;
2574 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2578 if (ost->encoding_needed) {
2579 AVCodec *codec = ost->enc;
2580 AVCodecContext *dec = NULL;
2583 if ((ist = get_input_stream(ost)))
2585 if (dec && dec->subtitle_header) {
2586 /* ASS code assumes this buffer is null terminated so add extra byte. */
2587 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2588 if (!ost->enc_ctx->subtitle_header)
2589 return AVERROR(ENOMEM);
2590 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2591 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2593 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2594 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2595 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2597 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2598 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2599 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2601 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2602 if (ret == AVERROR_EXPERIMENTAL)
2603 abort_codec_experimental(codec, 1);
2604 snprintf(error, error_len,
2605 "Error while opening encoder for output stream #%d:%d - "
2606 "maybe incorrect parameters such as bit_rate, rate, width or height",
2607 ost->file_index, ost->index);
2610 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2611 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2612 av_buffersink_set_frame_size(ost->filter->filter,
2613 ost->enc_ctx->frame_size);
2614 assert_avoptions(ost->encoder_opts);
2615 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2616 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2617 " It takes bits/s as argument, not kbits/s\n");
2619 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2621 av_log(NULL, AV_LOG_FATAL,
2622 "Error initializing the output stream codec context.\n");
2626 // copy timebase while removing common factors
2627 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2628 ost->st->codec->codec= ost->enc_ctx->codec;
2630 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2632 av_log(NULL, AV_LOG_FATAL,
2633 "Error setting up codec context options.\n");
2636 // copy timebase while removing common factors
2637 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2643 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2644 AVCodecContext *avctx)
2647 int n = 1, i, size, index = 0;
2650 for (p = kf; *p; p++)
2654 pts = av_malloc_array(size, sizeof(*pts));
2656 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2661 for (i = 0; i < n; i++) {
2662 char *next = strchr(p, ',');
2667 if (!memcmp(p, "chapters", 8)) {
2669 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2672 if (avf->nb_chapters > INT_MAX - size ||
2673 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2675 av_log(NULL, AV_LOG_FATAL,
2676 "Could not allocate forced key frames array.\n");
2679 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2680 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2682 for (j = 0; j < avf->nb_chapters; j++) {
2683 AVChapter *c = avf->chapters[j];
2684 av_assert1(index < size);
2685 pts[index++] = av_rescale_q(c->start, c->time_base,
2686 avctx->time_base) + t;
2691 t = parse_time_or_die("force_key_frames", p, 1);
2692 av_assert1(index < size);
2693 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2700 av_assert0(index == size);
2701 qsort(pts, size, sizeof(*pts), compare_int64);
2702 ost->forced_kf_count = size;
2703 ost->forced_kf_pts = pts;
2706 static void report_new_stream(int input_index, AVPacket *pkt)
2708 InputFile *file = input_files[input_index];
2709 AVStream *st = file->ctx->streams[pkt->stream_index];
2711 if (pkt->stream_index < file->nb_streams_warn)
2713 av_log(file->ctx, AV_LOG_WARNING,
2714 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2715 av_get_media_type_string(st->codec->codec_type),
2716 input_index, pkt->stream_index,
2717 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2718 file->nb_streams_warn = pkt->stream_index + 1;
2721 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2723 AVDictionaryEntry *e;
2725 uint8_t *encoder_string;
2726 int encoder_string_len;
2727 int format_flags = 0;
2728 int codec_flags = 0;
2730 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2733 e = av_dict_get(of->opts, "fflags", NULL, 0);
2735 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2738 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2740 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2742 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2745 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2748 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2749 encoder_string = av_mallocz(encoder_string_len);
2750 if (!encoder_string)
2753 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2754 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2756 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2757 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2758 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2759 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2762 static int transcode_init(void)
2764 int ret = 0, i, j, k;
2765 AVFormatContext *oc;
2768 char error[1024] = {0};
2771 for (i = 0; i < nb_filtergraphs; i++) {
2772 FilterGraph *fg = filtergraphs[i];
2773 for (j = 0; j < fg->nb_outputs; j++) {
2774 OutputFilter *ofilter = fg->outputs[j];
2775 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2777 if (fg->nb_inputs != 1)
2779 for (k = nb_input_streams-1; k >= 0 ; k--)
2780 if (fg->inputs[0]->ist == input_streams[k])
2782 ofilter->ost->source_index = k;
2786 /* init framerate emulation */
2787 for (i = 0; i < nb_input_files; i++) {
2788 InputFile *ifile = input_files[i];
2789 if (ifile->rate_emu)
2790 for (j = 0; j < ifile->nb_streams; j++)
2791 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2794 /* for each output stream, we compute the right encoding parameters */
2795 for (i = 0; i < nb_output_streams; i++) {
2796 AVCodecContext *enc_ctx;
2797 AVCodecContext *dec_ctx = NULL;
2798 ost = output_streams[i];
2799 oc = output_files[ost->file_index]->ctx;
2800 ist = get_input_stream(ost);
2802 if (ost->attachment_filename)
2805 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2808 dec_ctx = ist->dec_ctx;
2810 ost->st->disposition = ist->st->disposition;
2811 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2812 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2814 for (j=0; j<oc->nb_streams; j++) {
2815 AVStream *st = oc->streams[j];
2816 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2819 if (j == oc->nb_streams)
2820 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2821 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2824 if (ost->stream_copy) {
2826 uint64_t extra_size;
2828 av_assert0(ist && !ost->filter);
2830 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2832 if (extra_size > INT_MAX) {
2833 return AVERROR(EINVAL);
2836 /* if stream_copy is selected, no need to decode or encode */
2837 enc_ctx->codec_id = dec_ctx->codec_id;
2838 enc_ctx->codec_type = dec_ctx->codec_type;
2840 if (!enc_ctx->codec_tag) {
2841 unsigned int codec_tag;
2842 if (!oc->oformat->codec_tag ||
2843 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2844 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2845 enc_ctx->codec_tag = dec_ctx->codec_tag;
2848 enc_ctx->bit_rate = dec_ctx->bit_rate;
2849 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2850 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2851 enc_ctx->field_order = dec_ctx->field_order;
2852 if (dec_ctx->extradata_size) {
2853 enc_ctx->extradata = av_mallocz(extra_size);
2854 if (!enc_ctx->extradata) {
2855 return AVERROR(ENOMEM);
2857 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2859 enc_ctx->extradata_size= dec_ctx->extradata_size;
2860 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2862 enc_ctx->time_base = ist->st->time_base;
2864 * Avi is a special case here because it supports variable fps but
2865 * having the fps and timebase differe significantly adds quite some
2868 if(!strcmp(oc->oformat->name, "avi")) {
2869 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2870 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2871 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2872 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2874 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2875 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2876 enc_ctx->ticks_per_frame = 2;
2877 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2878 && av_q2d(ist->st->time_base) < 1.0/500
2880 enc_ctx->time_base = dec_ctx->time_base;
2881 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2882 enc_ctx->time_base.den *= 2;
2883 enc_ctx->ticks_per_frame = 2;
2885 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2886 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2887 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2888 && strcmp(oc->oformat->name, "f4v")
2890 if( copy_tb<0 && dec_ctx->time_base.den
2891 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2892 && av_q2d(ist->st->time_base) < 1.0/500
2894 enc_ctx->time_base = dec_ctx->time_base;
2895 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2898 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2899 && dec_ctx->time_base.num < dec_ctx->time_base.den
2900 && dec_ctx->time_base.num > 0
2901 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2902 enc_ctx->time_base = dec_ctx->time_base;
2905 if (!ost->frame_rate.num)
2906 ost->frame_rate = ist->framerate;
2907 if(ost->frame_rate.num)
2908 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2910 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2911 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2913 if (ist->st->nb_side_data) {
2914 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2915 sizeof(*ist->st->side_data));
2916 if (!ost->st->side_data)
2917 return AVERROR(ENOMEM);
2919 ost->st->nb_side_data = 0;
2920 for (j = 0; j < ist->st->nb_side_data; j++) {
2921 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2922 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2924 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2927 sd_dst->data = av_malloc(sd_src->size);
2929 return AVERROR(ENOMEM);
2930 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2931 sd_dst->size = sd_src->size;
2932 sd_dst->type = sd_src->type;
2933 ost->st->nb_side_data++;
2937 ost->parser = av_parser_init(enc_ctx->codec_id);
2939 switch (enc_ctx->codec_type) {
2940 case AVMEDIA_TYPE_AUDIO:
2941 if (audio_volume != 256) {
2942 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2945 enc_ctx->channel_layout = dec_ctx->channel_layout;
2946 enc_ctx->sample_rate = dec_ctx->sample_rate;
2947 enc_ctx->channels = dec_ctx->channels;
2948 enc_ctx->frame_size = dec_ctx->frame_size;
2949 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2950 enc_ctx->block_align = dec_ctx->block_align;
2951 enc_ctx->initial_padding = dec_ctx->delay;
2952 #if FF_API_AUDIOENC_DELAY
2953 enc_ctx->delay = dec_ctx->delay;
2955 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2956 enc_ctx->block_align= 0;
2957 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2958 enc_ctx->block_align= 0;
2960 case AVMEDIA_TYPE_VIDEO:
2961 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2962 enc_ctx->width = dec_ctx->width;
2963 enc_ctx->height = dec_ctx->height;
2964 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2965 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2967 av_mul_q(ost->frame_aspect_ratio,
2968 (AVRational){ enc_ctx->height, enc_ctx->width });
2969 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2970 "with stream copy may produce invalid files\n");
2972 else if (ist->st->sample_aspect_ratio.num)
2973 sar = ist->st->sample_aspect_ratio;
2975 sar = dec_ctx->sample_aspect_ratio;
2976 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2977 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2978 ost->st->r_frame_rate = ist->st->r_frame_rate;
2980 case AVMEDIA_TYPE_SUBTITLE:
2981 enc_ctx->width = dec_ctx->width;
2982 enc_ctx->height = dec_ctx->height;
2984 case AVMEDIA_TYPE_UNKNOWN:
2985 case AVMEDIA_TYPE_DATA:
2986 case AVMEDIA_TYPE_ATTACHMENT:
2993 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2995 /* should only happen when a default codec is not present. */
2996 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2997 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2998 ret = AVERROR(EINVAL);
3002 set_encoder_id(output_files[ost->file_index], ost);
3005 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3006 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3008 fg = init_simple_filtergraph(ist, ost);
3009 if (configure_filtergraph(fg)) {
3010 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3015 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3016 if (!ost->frame_rate.num)
3017 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3018 if (ist && !ost->frame_rate.num)
3019 ost->frame_rate = ist->framerate;
3020 if (ist && !ost->frame_rate.num)
3021 ost->frame_rate = ist->st->r_frame_rate;
3022 if (ist && !ost->frame_rate.num) {
3023 ost->frame_rate = (AVRational){25, 1};
3024 av_log(NULL, AV_LOG_WARNING,
3026 "about the input framerate is available. Falling "
3027 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3028 "if you want a different framerate.\n",
3029 ost->file_index, ost->index);
3031 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3032 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3033 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3034 ost->frame_rate = ost->enc->supported_framerates[idx];
3036 // reduce frame rate for mpeg4 to be within the spec limits
3037 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3038 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3039 ost->frame_rate.num, ost->frame_rate.den, 65535);
3043 switch (enc_ctx->codec_type) {
3044 case AVMEDIA_TYPE_AUDIO:
3045 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3046 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3047 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3048 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3049 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3051 case AVMEDIA_TYPE_VIDEO:
3052 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3053 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3054 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3055 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3056 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3057 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3058 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3060 for (j = 0; j < ost->forced_kf_count; j++)
3061 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3063 enc_ctx->time_base);
3065 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3066 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3067 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3068 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3069 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3070 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3071 if (!strncmp(ost->enc->name, "libx264", 7) &&
3072 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3073 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3074 av_log(NULL, AV_LOG_WARNING,
3075 "No pixel format specified, %s for H.264 encoding chosen.\n"
3076 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3077 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3078 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3079 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3080 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3081 av_log(NULL, AV_LOG_WARNING,
3082 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3083 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3084 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3085 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3087 ost->st->avg_frame_rate = ost->frame_rate;
3090 enc_ctx->width != dec_ctx->width ||
3091 enc_ctx->height != dec_ctx->height ||
3092 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3093 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3096 if (ost->forced_keyframes) {
3097 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3098 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3099 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3101 av_log(NULL, AV_LOG_ERROR,
3102 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3105 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3106 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3107 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3108 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3110 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3111 // parse it only for static kf timings
3112 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3113 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3117 case AVMEDIA_TYPE_SUBTITLE:
3118 enc_ctx->time_base = (AVRational){1, 1000};
3119 if (!enc_ctx->width) {
3120 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3121 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3124 case AVMEDIA_TYPE_DATA:
3132 if (ost->disposition) {
3133 static const AVOption opts[] = {
3134 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3135 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3136 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3137 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3138 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3139 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3140 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3141 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3142 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3143 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3144 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3145 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3146 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3147 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3150 static const AVClass class = {
3152 .item_name = av_default_item_name,
3154 .version = LIBAVUTIL_VERSION_INT,
3156 const AVClass *pclass = &class;
3158 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3164 /* open each encoder */
3165 for (i = 0; i < nb_output_streams; i++) {
3166 ret = init_output_stream(output_streams[i], error, sizeof(error));
3171 /* init input streams */
3172 for (i = 0; i < nb_input_streams; i++)
3173 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3174 for (i = 0; i < nb_output_streams; i++) {
3175 ost = output_streams[i];
3176 avcodec_close(ost->enc_ctx);
3181 /* discard unused programs */
3182 for (i = 0; i < nb_input_files; i++) {
3183 InputFile *ifile = input_files[i];
3184 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3185 AVProgram *p = ifile->ctx->programs[j];
3186 int discard = AVDISCARD_ALL;
3188 for (k = 0; k < p->nb_stream_indexes; k++)
3189 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3190 discard = AVDISCARD_DEFAULT;
3193 p->discard = discard;
3197 /* open files and write file headers */
3198 for (i = 0; i < nb_output_files; i++) {
3199 oc = output_files[i]->ctx;
3200 oc->interrupt_callback = int_cb;
3201 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3202 snprintf(error, sizeof(error),
3203 "Could not write header for output file #%d "
3204 "(incorrect codec parameters ?): %s",
3205 i, av_err2str(ret));
3206 ret = AVERROR(EINVAL);
3209 // assert_avoptions(output_files[i]->opts);
3210 if (strcmp(oc->oformat->name, "rtp")) {
3216 /* dump the file output parameters - cannot be done before in case
3218 for (i = 0; i < nb_output_files; i++) {
3219 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3222 /* dump the stream mapping */
3223 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3224 for (i = 0; i < nb_input_streams; i++) {
3225 ist = input_streams[i];
3227 for (j = 0; j < ist->nb_filters; j++) {
3228 if (ist->filters[j]->graph->graph_desc) {
3229 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3230 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3231 ist->filters[j]->name);
3232 if (nb_filtergraphs > 1)
3233 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3234 av_log(NULL, AV_LOG_INFO, "\n");
3239 for (i = 0; i < nb_output_streams; i++) {
3240 ost = output_streams[i];
3242 if (ost->attachment_filename) {
3243 /* an attached file */
3244 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3245 ost->attachment_filename, ost->file_index, ost->index);
3249 if (ost->filter && ost->filter->graph->graph_desc) {
3250 /* output from a complex graph */
3251 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3252 if (nb_filtergraphs > 1)
3253 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3255 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3256 ost->index, ost->enc ? ost->enc->name : "?");
3260 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3261 input_streams[ost->source_index]->file_index,
3262 input_streams[ost->source_index]->st->index,
3265 if (ost->sync_ist != input_streams[ost->source_index])
3266 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3267 ost->sync_ist->file_index,
3268 ost->sync_ist->st->index);
3269 if (ost->stream_copy)
3270 av_log(NULL, AV_LOG_INFO, " (copy)");
3272 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3273 const AVCodec *out_codec = ost->enc;
3274 const char *decoder_name = "?";
3275 const char *in_codec_name = "?";
3276 const char *encoder_name = "?";
3277 const char *out_codec_name = "?";
3278 const AVCodecDescriptor *desc;
3281 decoder_name = in_codec->name;
3282 desc = avcodec_descriptor_get(in_codec->id);
3284 in_codec_name = desc->name;
3285 if (!strcmp(decoder_name, in_codec_name))
3286 decoder_name = "native";
3290 encoder_name = out_codec->name;
3291 desc = avcodec_descriptor_get(out_codec->id);
3293 out_codec_name = desc->name;
3294 if (!strcmp(encoder_name, out_codec_name))
3295 encoder_name = "native";
3298 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3299 in_codec_name, decoder_name,
3300 out_codec_name, encoder_name);
3302 av_log(NULL, AV_LOG_INFO, "\n");
3306 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3310 if (sdp_filename || want_sdp) {
3314 transcode_init_done = 1;
3319 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3320 static int need_output(void)
3324 for (i = 0; i < nb_output_streams; i++) {
3325 OutputStream *ost = output_streams[i];
3326 OutputFile *of = output_files[ost->file_index];
3327 AVFormatContext *os = output_files[ost->file_index]->ctx;
3329 if (ost->finished ||
3330 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3332 if (ost->frame_number >= ost->max_frames) {
3334 for (j = 0; j < of->ctx->nb_streams; j++)
3335 close_output_stream(output_streams[of->ost_index + j]);
3346 * Select the output stream to process.
3348 * @return selected output stream, or NULL if none available
3350 static OutputStream *choose_output(void)
3353 int64_t opts_min = INT64_MAX;
3354 OutputStream *ost_min = NULL;
3356 for (i = 0; i < nb_output_streams; i++) {
3357 OutputStream *ost = output_streams[i];
3358 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3360 if (!ost->finished && opts < opts_min) {
3362 ost_min = ost->unavailable ? NULL : ost;
3368 static int check_keyboard_interaction(int64_t cur_time)
3371 static int64_t last_time;
3372 if (received_nb_signals)
3373 return AVERROR_EXIT;
3374 /* read_key() returns 0 on EOF */
3375 if(cur_time - last_time >= 100000 && !run_as_daemon){
3377 last_time = cur_time;
3381 return AVERROR_EXIT;
3382 if (key == '+') av_log_set_level(av_log_get_level()+10);
3383 if (key == '-') av_log_set_level(av_log_get_level()-10);
3384 if (key == 's') qp_hist ^= 1;
3387 do_hex_dump = do_pkt_dump = 0;
3388 } else if(do_pkt_dump){
3392 av_log_set_level(AV_LOG_DEBUG);
3394 if (key == 'c' || key == 'C'){
3395 char buf[4096], target[64], command[256], arg[256] = {0};
3398 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3400 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3405 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3406 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3407 target, time, command, arg);
3408 for (i = 0; i < nb_filtergraphs; i++) {
3409 FilterGraph *fg = filtergraphs[i];
3412 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3413 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3414 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3415 } else if (key == 'c') {
3416 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3417 ret = AVERROR_PATCHWELCOME;
3419 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3421 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3426 av_log(NULL, AV_LOG_ERROR,
3427 "Parse error, at least 3 arguments were expected, "
3428 "only %d given in string '%s'\n", n, buf);
3431 if (key == 'd' || key == 'D'){
3434 debug = input_streams[0]->st->codec->debug<<1;
3435 if(!debug) debug = 1;
3436 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3442 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3446 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3447 fprintf(stderr,"error parsing debug value\n");
3449 for(i=0;i<nb_input_streams;i++) {
3450 input_streams[i]->st->codec->debug = debug;
3452 for(i=0;i<nb_output_streams;i++) {
3453 OutputStream *ost = output_streams[i];
3454 ost->enc_ctx->debug = debug;
3456 if(debug) av_log_set_level(AV_LOG_DEBUG);
3457 fprintf(stderr,"debug=%d\n", debug);
3460 fprintf(stderr, "key function\n"
3461 "? show this help\n"
3462 "+ increase verbosity\n"
3463 "- decrease verbosity\n"
3464 "c Send command to first matching filter supporting it\n"
3465 "C Send/Que command to all matching filters\n"
3466 "D cycle through available debug modes\n"
3467 "h dump packets/hex press to cycle through the 3 states\n"
3469 "s Show QP histogram\n"
3476 static void *input_thread(void *arg)
3479 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3484 ret = av_read_frame(f->ctx, &pkt);
3486 if (ret == AVERROR(EAGAIN)) {
3491 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3494 av_dup_packet(&pkt);
3495 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3496 if (flags && ret == AVERROR(EAGAIN)) {
3498 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3499 av_log(f->ctx, AV_LOG_WARNING,
3500 "Thread message queue blocking; consider raising the "
3501 "thread_queue_size option (current value: %d)\n",
3502 f->thread_queue_size);
3505 if (ret != AVERROR_EOF)
3506 av_log(f->ctx, AV_LOG_ERROR,
3507 "Unable to send packet to main thread: %s\n",
3509 av_free_packet(&pkt);
3510 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3518 static void free_input_threads(void)
3522 for (i = 0; i < nb_input_files; i++) {
3523 InputFile *f = input_files[i];
3526 if (!f || !f->in_thread_queue)
3528 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3529 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3530 av_free_packet(&pkt);
3532 pthread_join(f->thread, NULL);
3534 av_thread_message_queue_free(&f->in_thread_queue);
3538 static int init_input_threads(void)
3542 if (nb_input_files == 1)
3545 for (i = 0; i < nb_input_files; i++) {
3546 InputFile *f = input_files[i];
3548 if (f->ctx->pb ? !f->ctx->pb->seekable :
3549 strcmp(f->ctx->iformat->name, "lavfi"))
3550 f->non_blocking = 1;
3551 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3552 f->thread_queue_size, sizeof(AVPacket));
3556 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3557 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3558 av_thread_message_queue_free(&f->in_thread_queue);
3559 return AVERROR(ret);
3565 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3567 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3569 AV_THREAD_MESSAGE_NONBLOCK : 0);
3573 static int get_input_packet(InputFile *f, AVPacket *pkt)
3577 for (i = 0; i < f->nb_streams; i++) {
3578 InputStream *ist = input_streams[f->ist_index + i];
3579 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3580 int64_t now = av_gettime_relative() - ist->start;
3582 return AVERROR(EAGAIN);
3587 if (nb_input_files > 1)
3588 return get_input_packet_mt(f, pkt);
3590 return av_read_frame(f->ctx, pkt);
3593 static int got_eagain(void)
3596 for (i = 0; i < nb_output_streams; i++)
3597 if (output_streams[i]->unavailable)
3602 static void reset_eagain(void)
3605 for (i = 0; i < nb_input_files; i++)
3606 input_files[i]->eagain = 0;
3607 for (i = 0; i < nb_output_streams; i++)
3608 output_streams[i]->unavailable = 0;
3611 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3612 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3613 AVRational time_base)
3619 return tmp_time_base;
3622 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3625 return tmp_time_base;
3631 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3634 AVCodecContext *avctx;
3635 int i, ret, has_audio = 0;
3636 int64_t duration = 0;
3638 ret = av_seek_frame(is, -1, is->start_time, 0);
3642 for (i = 0; i < ifile->nb_streams; i++) {
3643 ist = input_streams[ifile->ist_index + i];
3644 avctx = ist->dec_ctx;
3647 if (ist->decoding_needed) {
3648 process_input_packet(ist, NULL, 1);
3649 avcodec_flush_buffers(avctx);
3652 /* duration is the length of the last frame in a stream
3653 * when audio stream is present we don't care about
3654 * last video frame length because it's not defined exactly */
3655 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3659 for (i = 0; i < ifile->nb_streams; i++) {
3660 ist = input_streams[ifile->ist_index + i];
3661 avctx = ist->dec_ctx;
3664 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3665 AVRational sample_rate = {1, avctx->sample_rate};
3667 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3671 if (ist->framerate.num) {
3672 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3673 } else if (ist->st->avg_frame_rate.num) {
3674 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3675 } else duration = 1;
3677 if (!ifile->duration)
3678 ifile->time_base = ist->st->time_base;
3679 /* the total duration of the stream, max_pts - min_pts is
3680 * the duration of the stream without the last frame */
3681 duration += ist->max_pts - ist->min_pts;
3682 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3693 * - 0 -- one packet was read and processed
3694 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3695 * this function should be called again
3696 * - AVERROR_EOF -- this function should not be called again
3698 static int process_input(int file_index)
3700 InputFile *ifile = input_files[file_index];
3701 AVFormatContext *is;
3708 ret = get_input_packet(ifile, &pkt);
3710 if (ret == AVERROR(EAGAIN)) {
3714 if ((ret < 0) && (ifile->loop > 1)) {
3715 if ((ret = seek_to_start(ifile, is)) < 0)
3717 ret = get_input_packet(ifile, &pkt);
3720 if (ret != AVERROR_EOF) {
3721 print_error(is->filename, ret);
3726 for (i = 0; i < ifile->nb_streams; i++) {
3727 ist = input_streams[ifile->ist_index + i];
3728 if (ist->decoding_needed) {
3729 ret = process_input_packet(ist, NULL, 0);
3734 /* mark all outputs that don't go through lavfi as finished */
3735 for (j = 0; j < nb_output_streams; j++) {
3736 OutputStream *ost = output_streams[j];
3738 if (ost->source_index == ifile->ist_index + i &&
3739 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3740 finish_output_stream(ost);
3744 ifile->eof_reached = 1;
3745 return AVERROR(EAGAIN);
3751 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3752 is->streams[pkt.stream_index]);
3754 /* the following test is needed in case new streams appear
3755 dynamically in stream : we ignore them */
3756 if (pkt.stream_index >= ifile->nb_streams) {
3757 report_new_stream(file_index, &pkt);
3758 goto discard_packet;
3761 ist = input_streams[ifile->ist_index + pkt.stream_index];
3763 ist->data_size += pkt.size;
3767 goto discard_packet;
3770 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3771 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3772 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3773 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3774 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3775 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3776 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3777 av_ts2str(input_files[ist->file_index]->ts_offset),
3778 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3781 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3782 int64_t stime, stime2;
3783 // Correcting starttime based on the enabled streams
3784 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3785 // so we instead do it here as part of discontinuity handling
3786 if ( ist->next_dts == AV_NOPTS_VALUE
3787 && ifile->ts_offset == -is->start_time
3788 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3789 int64_t new_start_time = INT64_MAX;
3790 for (i=0; i<is->nb_streams; i++) {
3791 AVStream *st = is->streams[i];
3792 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3794 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3796 if (new_start_time > is->start_time) {
3797 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3798 ifile->ts_offset = -new_start_time;
3802 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3803 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3804 ist->wrap_correction_done = 1;
3806 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3807 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3808 ist->wrap_correction_done = 0;
3810 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3811 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3812 ist->wrap_correction_done = 0;
3816 /* add the stream-global side data to the first packet */
3817 if (ist->nb_packets == 1) {
3818 if (ist->st->nb_side_data)
3819 av_packet_split_side_data(&pkt);
3820 for (i = 0; i < ist->st->nb_side_data; i++) {
3821 AVPacketSideData *src_sd = &ist->st->side_data[i];
3824 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3826 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3829 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3833 memcpy(dst_data, src_sd->data, src_sd->size);
3837 if (pkt.dts != AV_NOPTS_VALUE)
3838 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3839 if (pkt.pts != AV_NOPTS_VALUE)
3840 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3842 if (pkt.pts != AV_NOPTS_VALUE)
3843 pkt.pts *= ist->ts_scale;
3844 if (pkt.dts != AV_NOPTS_VALUE)
3845 pkt.dts *= ist->ts_scale;
3847 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3848 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3849 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3850 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3851 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3852 int64_t delta = pkt_dts - ifile->last_ts;
3853 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3854 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3855 ifile->ts_offset -= delta;
3856 av_log(NULL, AV_LOG_DEBUG,
3857 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3858 delta, ifile->ts_offset);
3859 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3860 if (pkt.pts != AV_NOPTS_VALUE)
3861 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3865 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3866 if (pkt.pts != AV_NOPTS_VALUE) {
3867 pkt.pts += duration;
3868 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3869 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3872 if (pkt.dts != AV_NOPTS_VALUE)
3873 pkt.dts += duration;
3875 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3876 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3877 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3879 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3880 int64_t delta = pkt_dts - ist->next_dts;
3881 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3882 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3883 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3884 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3885 ifile->ts_offset -= delta;
3886 av_log(NULL, AV_LOG_DEBUG,
3887 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3888 delta, ifile->ts_offset);
3889 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3890 if (pkt.pts != AV_NOPTS_VALUE)
3891 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3894 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3895 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3896 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3897 pkt.dts = AV_NOPTS_VALUE;
3899 if (pkt.pts != AV_NOPTS_VALUE){
3900 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3901 delta = pkt_pts - ist->next_dts;
3902 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3903 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3904 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3905 pkt.pts = AV_NOPTS_VALUE;
3911 if (pkt.dts != AV_NOPTS_VALUE)
3912 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3915 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3916 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3917 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3918 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3919 av_ts2str(input_files[ist->file_index]->ts_offset),
3920 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3923 sub2video_heartbeat(ist, pkt.pts);
3925 process_input_packet(ist, &pkt, 0);
3928 av_free_packet(&pkt);
3934 * Perform a step of transcoding for the specified filter graph.
3936 * @param[in] graph filter graph to consider
3937 * @param[out] best_ist input stream where a frame would allow to continue
3938 * @return 0 for success, <0 for error
3940 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3943 int nb_requests, nb_requests_max = 0;
3944 InputFilter *ifilter;
3948 ret = avfilter_graph_request_oldest(graph->graph);
3950 return reap_filters(0);
3952 if (ret == AVERROR_EOF) {
3953 ret = reap_filters(1);
3954 for (i = 0; i < graph->nb_outputs; i++)
3955 close_output_stream(graph->outputs[i]->ost);
3958 if (ret != AVERROR(EAGAIN))
3961 for (i = 0; i < graph->nb_inputs; i++) {
3962 ifilter = graph->inputs[i];
3964 if (input_files[ist->file_index]->eagain ||
3965 input_files[ist->file_index]->eof_reached)
3967 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3968 if (nb_requests > nb_requests_max) {
3969 nb_requests_max = nb_requests;
3975 for (i = 0; i < graph->nb_outputs; i++)
3976 graph->outputs[i]->ost->unavailable = 1;
3982 * Run a single step of transcoding.
3984 * @return 0 for success, <0 for error
3986 static int transcode_step(void)
3992 ost = choose_output();
3999 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4004 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4009 av_assert0(ost->source_index >= 0);
4010 ist = input_streams[ost->source_index];
4013 ret = process_input(ist->file_index);
4014 if (ret == AVERROR(EAGAIN)) {
4015 if (input_files[ist->file_index]->eagain)
4016 ost->unavailable = 1;
4021 return ret == AVERROR_EOF ? 0 : ret;
4023 return reap_filters(0);
4027 * The following code is the main loop of the file converter
4029 static int transcode(void)
4032 AVFormatContext *os;
4035 int64_t timer_start;
4037 ret = transcode_init();
4041 if (stdin_interaction) {
4042 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4045 timer_start = av_gettime_relative();
4048 if ((ret = init_input_threads()) < 0)
4052 while (!received_sigterm) {
4053 int64_t cur_time= av_gettime_relative();
4055 /* if 'q' pressed, exits */
4056 if (stdin_interaction)
4057 if (check_keyboard_interaction(cur_time) < 0)
4060 /* check if there's any stream where output is still needed */
4061 if (!need_output()) {
4062 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4066 ret = transcode_step();
4068 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4072 av_strerror(ret, errbuf, sizeof(errbuf));
4074 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4079 /* dump report by using the output first video and audio streams */
4080 print_report(0, timer_start, cur_time);
4083 free_input_threads();
4086 /* at the end of stream, we must flush the decoder buffers */
4087 for (i = 0; i < nb_input_streams; i++) {
4088 ist = input_streams[i];
4089 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4090 process_input_packet(ist, NULL, 0);
4097 /* write the trailer if needed and close file */
4098 for (i = 0; i < nb_output_files; i++) {
4099 os = output_files[i]->ctx;
4100 av_write_trailer(os);
4103 /* dump report by using the first video and audio streams */
4104 print_report(1, timer_start, av_gettime_relative());
4106 /* close each encoder */
4107 for (i = 0; i < nb_output_streams; i++) {
4108 ost = output_streams[i];
4109 if (ost->encoding_needed) {
4110 av_freep(&ost->enc_ctx->stats_in);
4114 /* close each decoder */
4115 for (i = 0; i < nb_input_streams; i++) {
4116 ist = input_streams[i];
4117 if (ist->decoding_needed) {
4118 avcodec_close(ist->dec_ctx);
4119 if (ist->hwaccel_uninit)
4120 ist->hwaccel_uninit(ist->dec_ctx);
4129 free_input_threads();
4132 if (output_streams) {
4133 for (i = 0; i < nb_output_streams; i++) {
4134 ost = output_streams[i];
4137 fclose(ost->logfile);
4138 ost->logfile = NULL;
4140 av_freep(&ost->forced_kf_pts);
4141 av_freep(&ost->apad);
4142 av_freep(&ost->disposition);
4143 av_dict_free(&ost->encoder_opts);
4144 av_dict_free(&ost->sws_dict);
4145 av_dict_free(&ost->swr_opts);
4146 av_dict_free(&ost->resample_opts);
4147 av_dict_free(&ost->bsf_args);
4155 static int64_t getutime(void)
4158 struct rusage rusage;
4160 getrusage(RUSAGE_SELF, &rusage);
4161 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4162 #elif HAVE_GETPROCESSTIMES
4164 FILETIME c, e, k, u;
4165 proc = GetCurrentProcess();
4166 GetProcessTimes(proc, &c, &e, &k, &u);
4167 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4169 return av_gettime_relative();
4173 static int64_t getmaxrss(void)
4175 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4176 struct rusage rusage;
4177 getrusage(RUSAGE_SELF, &rusage);
4178 return (int64_t)rusage.ru_maxrss * 1024;
4179 #elif HAVE_GETPROCESSMEMORYINFO
4181 PROCESS_MEMORY_COUNTERS memcounters;
4182 proc = GetCurrentProcess();
4183 memcounters.cb = sizeof(memcounters);
4184 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4185 return memcounters.PeakPagefileUsage;
4191 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4195 int main(int argc, char **argv)
4200 register_exit(ffmpeg_cleanup);
4202 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4204 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4205 parse_loglevel(argc, argv, options);
4207 if(argc>1 && !strcmp(argv[1], "-d")){
4209 av_log_set_callback(log_callback_null);
4214 avcodec_register_all();
4216 avdevice_register_all();
4218 avfilter_register_all();
4220 avformat_network_init();
4222 show_banner(argc, argv, options);
4226 /* parse options and open all input/output files */
4227 ret = ffmpeg_parse_options(argc, argv);
4231 if (nb_output_files <= 0 && nb_input_files == 0) {
4233 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4237 /* file converter / grab */
4238 if (nb_output_files <= 0) {
4239 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4243 // if (nb_input_files == 0) {
4244 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4248 current_time = ti = getutime();
4249 if (transcode() < 0)
4251 ti = getutime() - ti;
4253 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4255 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4256 decode_error_stat[0], decode_error_stat[1]);
4257 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4260 exit_program(received_nb_signals ? 255 : main_return_code);
4261 return main_return_code;