2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num && ost->is_cfr) {
676 if (pkt->duration > 0)
677 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
678 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
684 av_packet_split_side_data(pkt);
686 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
687 print_error("", ret);
692 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
693 if (pkt->dts != AV_NOPTS_VALUE &&
694 pkt->pts != AV_NOPTS_VALUE &&
695 pkt->dts > pkt->pts) {
696 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
698 ost->file_index, ost->st->index);
700 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
701 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
702 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
705 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
706 pkt->dts != AV_NOPTS_VALUE &&
707 ost->last_mux_dts != AV_NOPTS_VALUE) {
708 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
709 if (pkt->dts < max) {
710 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
711 av_log(s, loglevel, "Non-monotonous DTS in output stream "
712 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
713 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
715 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
718 av_log(s, loglevel, "changing to %"PRId64". This may result "
719 "in incorrect timestamps in the output file.\n",
721 if(pkt->pts >= pkt->dts)
722 pkt->pts = FFMAX(pkt->pts, max);
727 ost->last_mux_dts = pkt->dts;
729 ost->data_size += pkt->size;
730 ost->packets_written++;
732 pkt->stream_index = ost->index;
735 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
736 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
737 av_get_media_type_string(ost->enc_ctx->codec_type),
738 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
739 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
744 ret = av_interleaved_write_frame(s, pkt);
746 print_error("av_interleaved_write_frame()", ret);
747 main_return_code = 1;
748 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
750 av_packet_unref(pkt);
753 static void close_output_stream(OutputStream *ost)
755 OutputFile *of = output_files[ost->file_index];
757 ost->finished |= ENCODER_FINISHED;
759 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
760 of->recording_time = FFMIN(of->recording_time, end);
764 static int check_recording_time(OutputStream *ost)
766 OutputFile *of = output_files[ost->file_index];
768 if (of->recording_time != INT64_MAX &&
769 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
770 AV_TIME_BASE_Q) >= 0) {
771 close_output_stream(ost);
777 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
780 AVCodecContext *enc = ost->enc_ctx;
784 av_init_packet(&pkt);
788 if (!check_recording_time(ost))
791 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
792 frame->pts = ost->sync_opts;
793 ost->sync_opts = frame->pts + frame->nb_samples;
794 ost->samples_encoded += frame->nb_samples;
795 ost->frames_encoded++;
797 av_assert0(pkt.size || !pkt.data);
798 update_benchmark(NULL);
800 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
801 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
802 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
803 enc->time_base.num, enc->time_base.den);
806 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
807 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
810 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
813 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
816 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
817 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
818 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
819 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
822 write_frame(s, &pkt, ost);
826 static void do_subtitle_out(AVFormatContext *s,
831 int subtitle_out_max_size = 1024 * 1024;
832 int subtitle_out_size, nb, i;
837 if (sub->pts == AV_NOPTS_VALUE) {
838 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
847 subtitle_out = av_malloc(subtitle_out_max_size);
849 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
854 /* Note: DVB subtitle need one packet to draw them and one other
855 packet to clear them */
856 /* XXX: signal it in the codec context ? */
857 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
862 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
864 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
865 pts -= output_files[ost->file_index]->start_time;
866 for (i = 0; i < nb; i++) {
867 unsigned save_num_rects = sub->num_rects;
869 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
870 if (!check_recording_time(ost))
874 // start_display_time is required to be 0
875 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
876 sub->end_display_time -= sub->start_display_time;
877 sub->start_display_time = 0;
881 ost->frames_encoded++;
883 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
884 subtitle_out_max_size, sub);
886 sub->num_rects = save_num_rects;
887 if (subtitle_out_size < 0) {
888 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
892 av_init_packet(&pkt);
893 pkt.data = subtitle_out;
894 pkt.size = subtitle_out_size;
895 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
896 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
897 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
898 /* XXX: the pts correction is handled here. Maybe handling
899 it in the codec would be better */
901 pkt.pts += 90 * sub->start_display_time;
903 pkt.pts += 90 * sub->end_display_time;
906 write_frame(s, &pkt, ost);
910 static void do_video_out(AVFormatContext *s,
912 AVFrame *next_picture,
915 int ret, format_video_sync;
917 AVCodecContext *enc = ost->enc_ctx;
918 AVCodecContext *mux_enc = ost->st->codec;
919 int nb_frames, nb0_frames, i;
920 double delta, delta0;
923 InputStream *ist = NULL;
924 AVFilterContext *filter = ost->filter->filter;
926 if (ost->source_index >= 0)
927 ist = input_streams[ost->source_index];
929 if (filter->inputs[0]->frame_rate.num > 0 &&
930 filter->inputs[0]->frame_rate.den > 0)
931 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
933 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
934 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
936 if (!ost->filters_script &&
940 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
941 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
946 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
947 ost->last_nb0_frames[1],
948 ost->last_nb0_frames[2]);
950 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
951 delta = delta0 + duration;
953 /* by default, we output a single frame */
954 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
957 format_video_sync = video_sync_method;
958 if (format_video_sync == VSYNC_AUTO) {
959 if(!strcmp(s->oformat->name, "avi")) {
960 format_video_sync = VSYNC_VFR;
962 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
964 && format_video_sync == VSYNC_CFR
965 && input_files[ist->file_index]->ctx->nb_streams == 1
966 && input_files[ist->file_index]->input_ts_offset == 0) {
967 format_video_sync = VSYNC_VSCFR;
969 if (format_video_sync == VSYNC_CFR && copy_ts) {
970 format_video_sync = VSYNC_VSCFR;
973 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
977 format_video_sync != VSYNC_PASSTHROUGH &&
978 format_video_sync != VSYNC_DROP) {
980 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
982 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
983 sync_ipts = ost->sync_opts;
988 switch (format_video_sync) {
990 if (ost->frame_number == 0 && delta0 >= 0.5) {
991 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
994 ost->sync_opts = lrint(sync_ipts);
997 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
998 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1000 } else if (delta < -1.1)
1002 else if (delta > 1.1) {
1003 nb_frames = lrintf(delta);
1005 nb0_frames = lrintf(delta0 - 0.6);
1011 else if (delta > 0.6)
1012 ost->sync_opts = lrint(sync_ipts);
1015 case VSYNC_PASSTHROUGH:
1016 ost->sync_opts = lrint(sync_ipts);
1023 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1024 nb0_frames = FFMIN(nb0_frames, nb_frames);
1026 memmove(ost->last_nb0_frames + 1,
1027 ost->last_nb0_frames,
1028 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1029 ost->last_nb0_frames[0] = nb0_frames;
1031 if (nb0_frames == 0 && ost->last_dropped) {
1033 av_log(NULL, AV_LOG_VERBOSE,
1034 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1035 ost->frame_number, ost->st->index, ost->last_frame->pts);
1037 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1038 if (nb_frames > dts_error_threshold * 30) {
1039 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1043 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1044 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1046 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1048 /* duplicates frame if needed */
1049 for (i = 0; i < nb_frames; i++) {
1050 AVFrame *in_picture;
1051 av_init_packet(&pkt);
1055 if (i < nb0_frames && ost->last_frame) {
1056 in_picture = ost->last_frame;
1058 in_picture = next_picture;
1063 in_picture->pts = ost->sync_opts;
1066 if (!check_recording_time(ost))
1068 if (ost->frame_number >= ost->max_frames)
1072 #if FF_API_LAVF_FMT_RAWPICTURE
1073 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1074 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1075 /* raw pictures are written as AVPicture structure to
1076 avoid any copies. We support temporarily the older
1078 if (in_picture->interlaced_frame)
1079 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1081 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1082 pkt.data = (uint8_t *)in_picture;
1083 pkt.size = sizeof(AVPicture);
1084 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1085 pkt.flags |= AV_PKT_FLAG_KEY;
1087 write_frame(s, &pkt, ost);
1091 int got_packet, forced_keyframe = 0;
1094 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1095 ost->top_field_first >= 0)
1096 in_picture->top_field_first = !!ost->top_field_first;
1098 if (in_picture->interlaced_frame) {
1099 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1100 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1102 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1104 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1106 in_picture->quality = enc->global_quality;
1107 in_picture->pict_type = 0;
1109 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1110 in_picture->pts * av_q2d(enc->time_base) : NAN;
1111 if (ost->forced_kf_index < ost->forced_kf_count &&
1112 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1113 ost->forced_kf_index++;
1114 forced_keyframe = 1;
1115 } else if (ost->forced_keyframes_pexpr) {
1117 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1118 res = av_expr_eval(ost->forced_keyframes_pexpr,
1119 ost->forced_keyframes_expr_const_values, NULL);
1120 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1121 ost->forced_keyframes_expr_const_values[FKF_N],
1122 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1123 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1124 ost->forced_keyframes_expr_const_values[FKF_T],
1125 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1128 forced_keyframe = 1;
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1130 ost->forced_keyframes_expr_const_values[FKF_N];
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1132 ost->forced_keyframes_expr_const_values[FKF_T];
1133 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1136 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1137 } else if ( ost->forced_keyframes
1138 && !strncmp(ost->forced_keyframes, "source", 6)
1139 && in_picture->key_frame==1) {
1140 forced_keyframe = 1;
1143 if (forced_keyframe) {
1144 in_picture->pict_type = AV_PICTURE_TYPE_I;
1145 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1148 update_benchmark(NULL);
1150 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1151 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1152 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1153 enc->time_base.num, enc->time_base.den);
1156 ost->frames_encoded++;
1158 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1159 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1161 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1167 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1168 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1169 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1170 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1173 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1174 pkt.pts = ost->sync_opts;
1176 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1179 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1180 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1181 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1182 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1185 frame_size = pkt.size;
1186 write_frame(s, &pkt, ost);
1188 /* if two pass, output log */
1189 if (ost->logfile && enc->stats_out) {
1190 fprintf(ost->logfile, "%s", enc->stats_out);
1196 * For video, number of frames in == number of packets out.
1197 * But there may be reordering, so we can't throw away frames on encoder
1198 * flush, we need to limit them here, before they go into encoder.
1200 ost->frame_number++;
1202 if (vstats_filename && frame_size)
1203 do_video_stats(ost, frame_size);
1206 if (!ost->last_frame)
1207 ost->last_frame = av_frame_alloc();
1208 av_frame_unref(ost->last_frame);
1209 if (next_picture && ost->last_frame)
1210 av_frame_ref(ost->last_frame, next_picture);
1212 av_frame_free(&ost->last_frame);
1215 static double psnr(double d)
1217 return -10.0 * log10(d);
1220 static void do_video_stats(OutputStream *ost, int frame_size)
1222 AVCodecContext *enc;
1224 double ti1, bitrate, avg_bitrate;
1226 /* this is executed just the first time do_video_stats is called */
1228 vstats_file = fopen(vstats_filename, "w");
1236 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1237 frame_number = ost->st->nb_frames;
1238 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1239 ost->quality / (float)FF_QP2LAMBDA);
1241 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1242 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1244 fprintf(vstats_file,"f_size= %6d ", frame_size);
1245 /* compute pts value */
1246 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1250 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1251 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1252 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1253 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1254 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1258 static void finish_output_stream(OutputStream *ost)
1260 OutputFile *of = output_files[ost->file_index];
1263 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1266 for (i = 0; i < of->ctx->nb_streams; i++)
1267 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1272 * Get and encode new output from any of the filtergraphs, without causing
1275 * @return 0 for success, <0 for severe errors
1277 static int reap_filters(int flush)
1279 AVFrame *filtered_frame = NULL;
1282 /* Reap all buffers present in the buffer sinks */
1283 for (i = 0; i < nb_output_streams; i++) {
1284 OutputStream *ost = output_streams[i];
1285 OutputFile *of = output_files[ost->file_index];
1286 AVFilterContext *filter;
1287 AVCodecContext *enc = ost->enc_ctx;
1292 filter = ost->filter->filter;
1294 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1295 return AVERROR(ENOMEM);
1297 filtered_frame = ost->filtered_frame;
1300 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1301 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1302 AV_BUFFERSINK_FLAG_NO_REQUEST);
1304 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1305 av_log(NULL, AV_LOG_WARNING,
1306 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1307 } else if (flush && ret == AVERROR_EOF) {
1308 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1309 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1313 if (ost->finished) {
1314 av_frame_unref(filtered_frame);
1317 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1318 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1319 AVRational tb = enc->time_base;
1320 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1322 tb.den <<= extra_bits;
1324 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1325 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1326 float_pts /= 1 << extra_bits;
1327 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1328 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1330 filtered_frame->pts =
1331 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1332 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1334 //if (ost->source_index >= 0)
1335 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1337 switch (filter->inputs[0]->type) {
1338 case AVMEDIA_TYPE_VIDEO:
1339 if (!ost->frame_aspect_ratio.num)
1340 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1343 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1344 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1346 enc->time_base.num, enc->time_base.den);
1349 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1351 case AVMEDIA_TYPE_AUDIO:
1352 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1353 enc->channels != av_frame_get_channels(filtered_frame)) {
1354 av_log(NULL, AV_LOG_ERROR,
1355 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1358 do_audio_out(of->ctx, ost, filtered_frame);
1361 // TODO support subtitle filters
1365 av_frame_unref(filtered_frame);
1372 static void print_final_stats(int64_t total_size)
1374 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1375 uint64_t subtitle_size = 0;
1376 uint64_t data_size = 0;
1377 float percent = -1.0;
1381 for (i = 0; i < nb_output_streams; i++) {
1382 OutputStream *ost = output_streams[i];
1383 switch (ost->enc_ctx->codec_type) {
1384 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1385 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1386 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1387 default: other_size += ost->data_size; break;
1389 extra_size += ost->enc_ctx->extradata_size;
1390 data_size += ost->data_size;
1391 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1392 != AV_CODEC_FLAG_PASS1)
1396 if (data_size && total_size>0 && total_size >= data_size)
1397 percent = 100.0 * (total_size - data_size) / data_size;
1399 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1400 video_size / 1024.0,
1401 audio_size / 1024.0,
1402 subtitle_size / 1024.0,
1403 other_size / 1024.0,
1404 extra_size / 1024.0);
1406 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1408 av_log(NULL, AV_LOG_INFO, "unknown");
1409 av_log(NULL, AV_LOG_INFO, "\n");
1411 /* print verbose per-stream stats */
1412 for (i = 0; i < nb_input_files; i++) {
1413 InputFile *f = input_files[i];
1414 uint64_t total_packets = 0, total_size = 0;
1416 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1417 i, f->ctx->filename);
1419 for (j = 0; j < f->nb_streams; j++) {
1420 InputStream *ist = input_streams[f->ist_index + j];
1421 enum AVMediaType type = ist->dec_ctx->codec_type;
1423 total_size += ist->data_size;
1424 total_packets += ist->nb_packets;
1426 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1427 i, j, media_type_string(type));
1428 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1429 ist->nb_packets, ist->data_size);
1431 if (ist->decoding_needed) {
1432 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1433 ist->frames_decoded);
1434 if (type == AVMEDIA_TYPE_AUDIO)
1435 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1436 av_log(NULL, AV_LOG_VERBOSE, "; ");
1439 av_log(NULL, AV_LOG_VERBOSE, "\n");
1442 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1443 total_packets, total_size);
1446 for (i = 0; i < nb_output_files; i++) {
1447 OutputFile *of = output_files[i];
1448 uint64_t total_packets = 0, total_size = 0;
1450 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1451 i, of->ctx->filename);
1453 for (j = 0; j < of->ctx->nb_streams; j++) {
1454 OutputStream *ost = output_streams[of->ost_index + j];
1455 enum AVMediaType type = ost->enc_ctx->codec_type;
1457 total_size += ost->data_size;
1458 total_packets += ost->packets_written;
1460 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1461 i, j, media_type_string(type));
1462 if (ost->encoding_needed) {
1463 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1464 ost->frames_encoded);
1465 if (type == AVMEDIA_TYPE_AUDIO)
1466 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1467 av_log(NULL, AV_LOG_VERBOSE, "; ");
1470 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1471 ost->packets_written, ost->data_size);
1473 av_log(NULL, AV_LOG_VERBOSE, "\n");
1476 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1477 total_packets, total_size);
1479 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1480 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1482 av_log(NULL, AV_LOG_WARNING, "\n");
1484 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1489 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1492 AVBPrint buf_script;
1494 AVFormatContext *oc;
1496 AVCodecContext *enc;
1497 int frame_number, vid, i;
1500 int64_t pts = INT64_MIN + 1;
1501 static int64_t last_time = -1;
1502 static int qp_histogram[52];
1503 int hours, mins, secs, us;
1506 if (!print_stats && !is_last_report && !progress_avio)
1509 if (!is_last_report) {
1510 if (last_time == -1) {
1511 last_time = cur_time;
1514 if ((cur_time - last_time) < 500000)
1516 last_time = cur_time;
1519 t = (cur_time-timer_start) / 1000000.0;
1522 oc = output_files[0]->ctx;
1524 total_size = avio_size(oc->pb);
1525 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1526 total_size = avio_tell(oc->pb);
1530 av_bprint_init(&buf_script, 0, 1);
1531 for (i = 0; i < nb_output_streams; i++) {
1533 ost = output_streams[i];
1535 if (!ost->stream_copy)
1536 q = ost->quality / (float) FF_QP2LAMBDA;
1538 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1539 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1540 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1541 ost->file_index, ost->index, q);
1543 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546 frame_number = ost->frame_number;
1547 fps = t > 1 ? frame_number / t : 0;
1548 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1549 frame_number, fps < 9.95, fps, q);
1550 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1551 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1552 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1553 ost->file_index, ost->index, q);
1555 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1559 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1561 for (j = 0; j < 32; j++)
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1565 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1567 double error, error_sum = 0;
1568 double scale, scale_sum = 0;
1570 char type[3] = { 'Y','U','V' };
1571 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1572 for (j = 0; j < 3; j++) {
1573 if (is_last_report) {
1574 error = enc->error[j];
1575 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1577 error = ost->error[j];
1578 scale = enc->width * enc->height * 255.0 * 255.0;
1584 p = psnr(error / scale);
1585 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1586 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1587 ost->file_index, ost->index, type[j] | 32, p);
1589 p = psnr(error_sum / scale_sum);
1590 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1591 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1592 ost->file_index, ost->index, p);
1596 /* compute min output value */
1597 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1598 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1599 ost->st->time_base, AV_TIME_BASE_Q));
1601 nb_frames_drop += ost->last_dropped;
1604 secs = FFABS(pts) / AV_TIME_BASE;
1605 us = FFABS(pts) % AV_TIME_BASE;
1611 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1612 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1614 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1616 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1617 "size=%8.0fkB time=", total_size / 1024.0);
1619 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1620 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1621 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1622 (100 * us) / AV_TIME_BASE);
1625 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1626 av_bprintf(&buf_script, "bitrate=N/A\n");
1628 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1629 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1632 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1633 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1634 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1635 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1636 hours, mins, secs, us);
1638 if (nb_frames_dup || nb_frames_drop)
1639 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1640 nb_frames_dup, nb_frames_drop);
1641 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1642 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1645 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1646 av_bprintf(&buf_script, "speed=N/A\n");
1648 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1649 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1652 if (print_stats || is_last_report) {
1653 const char end = is_last_report ? '\n' : '\r';
1654 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1655 fprintf(stderr, "%s %c", buf, end);
1657 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1662 if (progress_avio) {
1663 av_bprintf(&buf_script, "progress=%s\n",
1664 is_last_report ? "end" : "continue");
1665 avio_write(progress_avio, buf_script.str,
1666 FFMIN(buf_script.len, buf_script.size - 1));
1667 avio_flush(progress_avio);
1668 av_bprint_finalize(&buf_script, NULL);
1669 if (is_last_report) {
1670 avio_closep(&progress_avio);
1675 print_final_stats(total_size);
1678 static void flush_encoders(void)
1682 for (i = 0; i < nb_output_streams; i++) {
1683 OutputStream *ost = output_streams[i];
1684 AVCodecContext *enc = ost->enc_ctx;
1685 AVFormatContext *os = output_files[ost->file_index]->ctx;
1686 int stop_encoding = 0;
1688 if (!ost->encoding_needed)
1691 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1693 #if FF_API_LAVF_FMT_RAWPICTURE
1694 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1699 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1702 switch (enc->codec_type) {
1703 case AVMEDIA_TYPE_AUDIO:
1704 encode = avcodec_encode_audio2;
1707 case AVMEDIA_TYPE_VIDEO:
1708 encode = avcodec_encode_video2;
1719 av_init_packet(&pkt);
1723 update_benchmark(NULL);
1724 ret = encode(enc, &pkt, NULL, &got_packet);
1725 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1727 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1732 if (ost->logfile && enc->stats_out) {
1733 fprintf(ost->logfile, "%s", enc->stats_out);
1739 if (ost->finished & MUXER_FINISHED) {
1740 av_packet_unref(&pkt);
1743 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1744 pkt_size = pkt.size;
1745 write_frame(os, &pkt, ost);
1746 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1747 do_video_stats(ost, pkt_size);
1758 * Check whether a packet from ist should be written into ost at this time
1760 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1762 OutputFile *of = output_files[ost->file_index];
1763 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1765 if (ost->source_index != ist_index)
1771 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1777 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1779 OutputFile *of = output_files[ost->file_index];
1780 InputFile *f = input_files [ist->file_index];
1781 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1782 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1786 av_init_packet(&opkt);
1788 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1789 !ost->copy_initial_nonkeyframes)
1792 if (!ost->frame_number && !ost->copy_prior_start) {
1793 int64_t comp_start = start_time;
1794 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1795 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1796 if (pkt->pts == AV_NOPTS_VALUE ?
1797 ist->pts < comp_start :
1798 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1802 if (of->recording_time != INT64_MAX &&
1803 ist->pts >= of->recording_time + start_time) {
1804 close_output_stream(ost);
1808 if (f->recording_time != INT64_MAX) {
1809 start_time = f->ctx->start_time;
1810 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1811 start_time += f->start_time;
1812 if (ist->pts >= f->recording_time + start_time) {
1813 close_output_stream(ost);
1818 /* force the input stream PTS */
1819 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1822 if (pkt->pts != AV_NOPTS_VALUE)
1823 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1825 opkt.pts = AV_NOPTS_VALUE;
1827 if (pkt->dts == AV_NOPTS_VALUE)
1828 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1830 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1831 opkt.dts -= ost_tb_start_time;
1833 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1834 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1836 duration = ist->dec_ctx->frame_size;
1837 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1838 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1839 ost->st->time_base) - ost_tb_start_time;
1842 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1843 opkt.flags = pkt->flags;
1844 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1845 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1846 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1847 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1848 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1850 int ret = av_parser_change(ost->parser, ost->st->codec,
1851 &opkt.data, &opkt.size,
1852 pkt->data, pkt->size,
1853 pkt->flags & AV_PKT_FLAG_KEY);
1855 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1860 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1865 opkt.data = pkt->data;
1866 opkt.size = pkt->size;
1868 av_copy_packet_side_data(&opkt, pkt);
1870 #if FF_API_LAVF_FMT_RAWPICTURE
1871 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1872 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1873 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1874 /* store AVPicture in AVPacket, as expected by the output format */
1875 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1877 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1881 opkt.data = (uint8_t *)&pict;
1882 opkt.size = sizeof(AVPicture);
1883 opkt.flags |= AV_PKT_FLAG_KEY;
1887 write_frame(of->ctx, &opkt, ost);
1890 int guess_input_channel_layout(InputStream *ist)
1892 AVCodecContext *dec = ist->dec_ctx;
1894 if (!dec->channel_layout) {
1895 char layout_name[256];
1897 if (dec->channels > ist->guess_layout_max)
1899 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1900 if (!dec->channel_layout)
1902 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1903 dec->channels, dec->channel_layout);
1904 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1905 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1910 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1912 if (*got_output || ret<0)
1913 decode_error_stat[ret<0] ++;
1915 if (ret < 0 && exit_on_error)
1918 if (exit_on_error && *got_output && ist) {
1919 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1920 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1926 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1928 AVFrame *decoded_frame, *f;
1929 AVCodecContext *avctx = ist->dec_ctx;
1930 int i, ret, err = 0, resample_changed;
1931 AVRational decoded_frame_tb;
1933 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1934 return AVERROR(ENOMEM);
1935 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1936 return AVERROR(ENOMEM);
1937 decoded_frame = ist->decoded_frame;
1939 update_benchmark(NULL);
1940 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1941 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1943 if (ret >= 0 && avctx->sample_rate <= 0) {
1944 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1945 ret = AVERROR_INVALIDDATA;
1948 check_decode_result(ist, got_output, ret);
1950 if (!*got_output || ret < 0)
1953 ist->samples_decoded += decoded_frame->nb_samples;
1954 ist->frames_decoded++;
1957 /* increment next_dts to use for the case where the input stream does not
1958 have timestamps or there are multiple frames in the packet */
1959 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1961 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1965 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1966 ist->resample_channels != avctx->channels ||
1967 ist->resample_channel_layout != decoded_frame->channel_layout ||
1968 ist->resample_sample_rate != decoded_frame->sample_rate;
1969 if (resample_changed) {
1970 char layout1[64], layout2[64];
1972 if (!guess_input_channel_layout(ist)) {
1973 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1974 "layout for Input Stream #%d.%d\n", ist->file_index,
1978 decoded_frame->channel_layout = avctx->channel_layout;
1980 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1981 ist->resample_channel_layout);
1982 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1983 decoded_frame->channel_layout);
1985 av_log(NULL, AV_LOG_INFO,
1986 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1987 ist->file_index, ist->st->index,
1988 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1989 ist->resample_channels, layout1,
1990 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1991 avctx->channels, layout2);
1993 ist->resample_sample_fmt = decoded_frame->format;
1994 ist->resample_sample_rate = decoded_frame->sample_rate;
1995 ist->resample_channel_layout = decoded_frame->channel_layout;
1996 ist->resample_channels = avctx->channels;
1998 for (i = 0; i < nb_filtergraphs; i++)
1999 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2000 FilterGraph *fg = filtergraphs[i];
2001 if (configure_filtergraph(fg) < 0) {
2002 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2008 /* if the decoder provides a pts, use it instead of the last packet pts.
2009 the decoder could be delaying output by a packet or more. */
2010 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2011 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2012 decoded_frame_tb = avctx->time_base;
2013 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2014 decoded_frame->pts = decoded_frame->pkt_pts;
2015 decoded_frame_tb = ist->st->time_base;
2016 } else if (pkt->pts != AV_NOPTS_VALUE) {
2017 decoded_frame->pts = pkt->pts;
2018 decoded_frame_tb = ist->st->time_base;
2020 decoded_frame->pts = ist->dts;
2021 decoded_frame_tb = AV_TIME_BASE_Q;
2023 pkt->pts = AV_NOPTS_VALUE;
2024 if (decoded_frame->pts != AV_NOPTS_VALUE)
2025 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2026 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2027 (AVRational){1, avctx->sample_rate});
2028 ist->nb_samples = decoded_frame->nb_samples;
2029 for (i = 0; i < ist->nb_filters; i++) {
2030 if (i < ist->nb_filters - 1) {
2031 f = ist->filter_frame;
2032 err = av_frame_ref(f, decoded_frame);
2037 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2038 AV_BUFFERSRC_FLAG_PUSH);
2039 if (err == AVERROR_EOF)
2040 err = 0; /* ignore */
2044 decoded_frame->pts = AV_NOPTS_VALUE;
2046 av_frame_unref(ist->filter_frame);
2047 av_frame_unref(decoded_frame);
2048 return err < 0 ? err : ret;
2051 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2053 AVFrame *decoded_frame, *f;
2054 int i, ret = 0, err = 0, resample_changed;
2055 int64_t best_effort_timestamp;
2056 AVRational *frame_sample_aspect;
2058 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2059 return AVERROR(ENOMEM);
2060 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2061 return AVERROR(ENOMEM);
2062 decoded_frame = ist->decoded_frame;
2063 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2065 update_benchmark(NULL);
2066 ret = avcodec_decode_video2(ist->dec_ctx,
2067 decoded_frame, got_output, pkt);
2068 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2070 // The following line may be required in some cases where there is no parser
2071 // or the parser does not has_b_frames correctly
2072 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2073 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2074 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2076 av_log(ist->dec_ctx, AV_LOG_WARNING,
2077 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2078 "If you want to help, upload a sample "
2079 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2080 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2081 ist->dec_ctx->has_b_frames,
2082 ist->st->codec->has_b_frames);
2085 check_decode_result(ist, got_output, ret);
2087 if (*got_output && ret >= 0) {
2088 if (ist->dec_ctx->width != decoded_frame->width ||
2089 ist->dec_ctx->height != decoded_frame->height ||
2090 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2091 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2092 decoded_frame->width,
2093 decoded_frame->height,
2094 decoded_frame->format,
2095 ist->dec_ctx->width,
2096 ist->dec_ctx->height,
2097 ist->dec_ctx->pix_fmt);
2101 if (!*got_output || ret < 0)
2104 if(ist->top_field_first>=0)
2105 decoded_frame->top_field_first = ist->top_field_first;
2107 ist->frames_decoded++;
2109 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2110 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2114 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2116 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2117 if(best_effort_timestamp != AV_NOPTS_VALUE)
2118 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2121 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2122 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2123 ist->st->index, av_ts2str(decoded_frame->pts),
2124 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2125 best_effort_timestamp,
2126 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2127 decoded_frame->key_frame, decoded_frame->pict_type,
2128 ist->st->time_base.num, ist->st->time_base.den);
2133 if (ist->st->sample_aspect_ratio.num)
2134 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2136 resample_changed = ist->resample_width != decoded_frame->width ||
2137 ist->resample_height != decoded_frame->height ||
2138 ist->resample_pix_fmt != decoded_frame->format;
2139 if (resample_changed) {
2140 av_log(NULL, AV_LOG_INFO,
2141 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2142 ist->file_index, ist->st->index,
2143 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2144 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2146 ist->resample_width = decoded_frame->width;
2147 ist->resample_height = decoded_frame->height;
2148 ist->resample_pix_fmt = decoded_frame->format;
2150 for (i = 0; i < nb_filtergraphs; i++) {
2151 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2152 configure_filtergraph(filtergraphs[i]) < 0) {
2153 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2159 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2160 for (i = 0; i < ist->nb_filters; i++) {
2161 if (!frame_sample_aspect->num)
2162 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2164 if (i < ist->nb_filters - 1) {
2165 f = ist->filter_frame;
2166 err = av_frame_ref(f, decoded_frame);
2171 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2172 if (ret == AVERROR_EOF) {
2173 ret = 0; /* ignore */
2174 } else if (ret < 0) {
2175 av_log(NULL, AV_LOG_FATAL,
2176 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2182 av_frame_unref(ist->filter_frame);
2183 av_frame_unref(decoded_frame);
2184 return err < 0 ? err : ret;
2187 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2189 AVSubtitle subtitle;
2190 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2191 &subtitle, got_output, pkt);
2193 check_decode_result(NULL, got_output, ret);
2195 if (ret < 0 || !*got_output) {
2197 sub2video_flush(ist);
2201 if (ist->fix_sub_duration) {
2203 if (ist->prev_sub.got_output) {
2204 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2205 1000, AV_TIME_BASE);
2206 if (end < ist->prev_sub.subtitle.end_display_time) {
2207 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2208 "Subtitle duration reduced from %d to %d%s\n",
2209 ist->prev_sub.subtitle.end_display_time, end,
2210 end <= 0 ? ", dropping it" : "");
2211 ist->prev_sub.subtitle.end_display_time = end;
2214 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2215 FFSWAP(int, ret, ist->prev_sub.ret);
2216 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2224 sub2video_update(ist, &subtitle);
2226 if (!subtitle.num_rects)
2229 ist->frames_decoded++;
2231 for (i = 0; i < nb_output_streams; i++) {
2232 OutputStream *ost = output_streams[i];
2234 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2235 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2238 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2242 avsubtitle_free(&subtitle);
2246 static int send_filter_eof(InputStream *ist)
2249 for (i = 0; i < ist->nb_filters; i++) {
2250 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2257 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2258 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2264 if (!ist->saw_first_ts) {
2265 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2267 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2268 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2269 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2271 ist->saw_first_ts = 1;
2274 if (ist->next_dts == AV_NOPTS_VALUE)
2275 ist->next_dts = ist->dts;
2276 if (ist->next_pts == AV_NOPTS_VALUE)
2277 ist->next_pts = ist->pts;
2281 av_init_packet(&avpkt);
2289 if (pkt->dts != AV_NOPTS_VALUE) {
2290 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2291 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2292 ist->next_pts = ist->pts = ist->dts;
2295 // while we have more to decode or while the decoder did output something on EOF
2296 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2300 ist->pts = ist->next_pts;
2301 ist->dts = ist->next_dts;
2303 if (avpkt.size && avpkt.size != pkt->size &&
2304 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2305 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2306 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2307 ist->showed_multi_packet_warning = 1;
2310 switch (ist->dec_ctx->codec_type) {
2311 case AVMEDIA_TYPE_AUDIO:
2312 ret = decode_audio (ist, &avpkt, &got_output);
2314 case AVMEDIA_TYPE_VIDEO:
2315 ret = decode_video (ist, &avpkt, &got_output);
2316 if (avpkt.duration) {
2317 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2318 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2319 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2320 duration = ((int64_t)AV_TIME_BASE *
2321 ist->dec_ctx->framerate.den * ticks) /
2322 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2326 if(ist->dts != AV_NOPTS_VALUE && duration) {
2327 ist->next_dts += duration;
2329 ist->next_dts = AV_NOPTS_VALUE;
2332 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2334 case AVMEDIA_TYPE_SUBTITLE:
2335 ret = transcode_subtitles(ist, &avpkt, &got_output);
2342 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2343 ist->file_index, ist->st->index, av_err2str(ret));
2350 avpkt.pts= AV_NOPTS_VALUE;
2352 // touch data and size only if not EOF
2354 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2362 if (got_output && !pkt)
2366 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2367 /* except when looping we need to flush but not to send an EOF */
2368 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2369 int ret = send_filter_eof(ist);
2371 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2376 /* handle stream copy */
2377 if (!ist->decoding_needed) {
2378 ist->dts = ist->next_dts;
2379 switch (ist->dec_ctx->codec_type) {
2380 case AVMEDIA_TYPE_AUDIO:
2381 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2382 ist->dec_ctx->sample_rate;
2384 case AVMEDIA_TYPE_VIDEO:
2385 if (ist->framerate.num) {
2386 // TODO: Remove work-around for c99-to-c89 issue 7
2387 AVRational time_base_q = AV_TIME_BASE_Q;
2388 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2389 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2390 } else if (pkt->duration) {
2391 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2392 } else if(ist->dec_ctx->framerate.num != 0) {
2393 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2394 ist->next_dts += ((int64_t)AV_TIME_BASE *
2395 ist->dec_ctx->framerate.den * ticks) /
2396 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2400 ist->pts = ist->dts;
2401 ist->next_pts = ist->next_dts;
2403 for (i = 0; pkt && i < nb_output_streams; i++) {
2404 OutputStream *ost = output_streams[i];
2406 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2409 do_streamcopy(ist, ost, pkt);
2415 static void print_sdp(void)
2420 AVIOContext *sdp_pb;
2421 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2425 for (i = 0, j = 0; i < nb_output_files; i++) {
2426 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2427 avc[j] = output_files[i]->ctx;
2435 av_sdp_create(avc, j, sdp, sizeof(sdp));
2437 if (!sdp_filename) {
2438 printf("SDP:\n%s\n", sdp);
2441 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2442 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2444 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2445 avio_closep(&sdp_pb);
2446 av_freep(&sdp_filename);
2454 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2457 for (i = 0; hwaccels[i].name; i++)
2458 if (hwaccels[i].pix_fmt == pix_fmt)
2459 return &hwaccels[i];
2463 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2465 InputStream *ist = s->opaque;
2466 const enum AVPixelFormat *p;
2469 for (p = pix_fmts; *p != -1; p++) {
2470 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2471 const HWAccel *hwaccel;
2473 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2476 hwaccel = get_hwaccel(*p);
2478 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2479 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2482 ret = hwaccel->init(s);
2484 if (ist->hwaccel_id == hwaccel->id) {
2485 av_log(NULL, AV_LOG_FATAL,
2486 "%s hwaccel requested for input stream #%d:%d, "
2487 "but cannot be initialized.\n", hwaccel->name,
2488 ist->file_index, ist->st->index);
2489 return AV_PIX_FMT_NONE;
2493 ist->active_hwaccel_id = hwaccel->id;
2494 ist->hwaccel_pix_fmt = *p;
2501 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2503 InputStream *ist = s->opaque;
2505 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2506 return ist->hwaccel_get_buffer(s, frame, flags);
2508 return avcodec_default_get_buffer2(s, frame, flags);
2511 static int init_input_stream(int ist_index, char *error, int error_len)
2514 InputStream *ist = input_streams[ist_index];
2516 if (ist->decoding_needed) {
2517 AVCodec *codec = ist->dec;
2519 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2520 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2521 return AVERROR(EINVAL);
2524 ist->dec_ctx->opaque = ist;
2525 ist->dec_ctx->get_format = get_format;
2526 ist->dec_ctx->get_buffer2 = get_buffer;
2527 ist->dec_ctx->thread_safe_callbacks = 1;
2529 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2530 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2531 (ist->decoding_needed & DECODING_FOR_OST)) {
2532 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2533 if (ist->decoding_needed & DECODING_FOR_FILTER)
2534 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2537 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2538 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2539 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2540 if (ret == AVERROR_EXPERIMENTAL)
2541 abort_codec_experimental(codec, 0);
2543 snprintf(error, error_len,
2544 "Error while opening decoder for input stream "
2546 ist->file_index, ist->st->index, av_err2str(ret));
2549 assert_avoptions(ist->decoder_opts);
2552 ist->next_pts = AV_NOPTS_VALUE;
2553 ist->next_dts = AV_NOPTS_VALUE;
2558 static InputStream *get_input_stream(OutputStream *ost)
2560 if (ost->source_index >= 0)
2561 return input_streams[ost->source_index];
2565 static int compare_int64(const void *a, const void *b)
2567 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2570 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2574 if (ost->encoding_needed) {
2575 AVCodec *codec = ost->enc;
2576 AVCodecContext *dec = NULL;
2579 if ((ist = get_input_stream(ost)))
2581 if (dec && dec->subtitle_header) {
2582 /* ASS code assumes this buffer is null terminated so add extra byte. */
2583 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2584 if (!ost->enc_ctx->subtitle_header)
2585 return AVERROR(ENOMEM);
2586 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2587 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2589 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2590 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2591 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2593 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2594 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2595 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2597 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2598 if (ret == AVERROR_EXPERIMENTAL)
2599 abort_codec_experimental(codec, 1);
2600 snprintf(error, error_len,
2601 "Error while opening encoder for output stream #%d:%d - "
2602 "maybe incorrect parameters such as bit_rate, rate, width or height",
2603 ost->file_index, ost->index);
2606 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2607 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2608 av_buffersink_set_frame_size(ost->filter->filter,
2609 ost->enc_ctx->frame_size);
2610 assert_avoptions(ost->encoder_opts);
2611 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2612 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2613 " It takes bits/s as argument, not kbits/s\n");
2615 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2617 av_log(NULL, AV_LOG_FATAL,
2618 "Error initializing the output stream codec context.\n");
2622 if (ost->enc_ctx->nb_coded_side_data) {
2625 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2626 sizeof(*ost->st->side_data));
2627 if (!ost->st->side_data)
2628 return AVERROR(ENOMEM);
2630 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2631 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2632 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2634 sd_dst->data = av_malloc(sd_src->size);
2636 return AVERROR(ENOMEM);
2637 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2638 sd_dst->size = sd_src->size;
2639 sd_dst->type = sd_src->type;
2640 ost->st->nb_side_data++;
2644 // copy timebase while removing common factors
2645 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2646 ost->st->codec->codec= ost->enc_ctx->codec;
2648 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2650 av_log(NULL, AV_LOG_FATAL,
2651 "Error setting up codec context options.\n");
2654 // copy timebase while removing common factors
2655 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2661 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2662 AVCodecContext *avctx)
2665 int n = 1, i, size, index = 0;
2668 for (p = kf; *p; p++)
2672 pts = av_malloc_array(size, sizeof(*pts));
2674 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2679 for (i = 0; i < n; i++) {
2680 char *next = strchr(p, ',');
2685 if (!memcmp(p, "chapters", 8)) {
2687 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2690 if (avf->nb_chapters > INT_MAX - size ||
2691 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2693 av_log(NULL, AV_LOG_FATAL,
2694 "Could not allocate forced key frames array.\n");
2697 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2698 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2700 for (j = 0; j < avf->nb_chapters; j++) {
2701 AVChapter *c = avf->chapters[j];
2702 av_assert1(index < size);
2703 pts[index++] = av_rescale_q(c->start, c->time_base,
2704 avctx->time_base) + t;
2709 t = parse_time_or_die("force_key_frames", p, 1);
2710 av_assert1(index < size);
2711 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2718 av_assert0(index == size);
2719 qsort(pts, size, sizeof(*pts), compare_int64);
2720 ost->forced_kf_count = size;
2721 ost->forced_kf_pts = pts;
2724 static void report_new_stream(int input_index, AVPacket *pkt)
2726 InputFile *file = input_files[input_index];
2727 AVStream *st = file->ctx->streams[pkt->stream_index];
2729 if (pkt->stream_index < file->nb_streams_warn)
2731 av_log(file->ctx, AV_LOG_WARNING,
2732 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2733 av_get_media_type_string(st->codec->codec_type),
2734 input_index, pkt->stream_index,
2735 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2736 file->nb_streams_warn = pkt->stream_index + 1;
2739 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2741 AVDictionaryEntry *e;
2743 uint8_t *encoder_string;
2744 int encoder_string_len;
2745 int format_flags = 0;
2746 int codec_flags = 0;
2748 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2751 e = av_dict_get(of->opts, "fflags", NULL, 0);
2753 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2756 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2758 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2760 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2763 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2766 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2767 encoder_string = av_mallocz(encoder_string_len);
2768 if (!encoder_string)
2771 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2772 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2774 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2775 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2776 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2777 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2780 static int transcode_init(void)
2782 int ret = 0, i, j, k;
2783 AVFormatContext *oc;
2786 char error[1024] = {0};
2789 for (i = 0; i < nb_filtergraphs; i++) {
2790 FilterGraph *fg = filtergraphs[i];
2791 for (j = 0; j < fg->nb_outputs; j++) {
2792 OutputFilter *ofilter = fg->outputs[j];
2793 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2795 if (fg->nb_inputs != 1)
2797 for (k = nb_input_streams-1; k >= 0 ; k--)
2798 if (fg->inputs[0]->ist == input_streams[k])
2800 ofilter->ost->source_index = k;
2804 /* init framerate emulation */
2805 for (i = 0; i < nb_input_files; i++) {
2806 InputFile *ifile = input_files[i];
2807 if (ifile->rate_emu)
2808 for (j = 0; j < ifile->nb_streams; j++)
2809 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2812 /* for each output stream, we compute the right encoding parameters */
2813 for (i = 0; i < nb_output_streams; i++) {
2814 AVCodecContext *enc_ctx;
2815 AVCodecContext *dec_ctx = NULL;
2816 ost = output_streams[i];
2817 oc = output_files[ost->file_index]->ctx;
2818 ist = get_input_stream(ost);
2820 if (ost->attachment_filename)
2823 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2826 dec_ctx = ist->dec_ctx;
2828 ost->st->disposition = ist->st->disposition;
2829 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2830 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2832 for (j=0; j<oc->nb_streams; j++) {
2833 AVStream *st = oc->streams[j];
2834 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2837 if (j == oc->nb_streams)
2838 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2839 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2842 if (ost->stream_copy) {
2844 uint64_t extra_size;
2846 av_assert0(ist && !ost->filter);
2848 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2850 if (extra_size > INT_MAX) {
2851 return AVERROR(EINVAL);
2854 /* if stream_copy is selected, no need to decode or encode */
2855 enc_ctx->codec_id = dec_ctx->codec_id;
2856 enc_ctx->codec_type = dec_ctx->codec_type;
2858 if (!enc_ctx->codec_tag) {
2859 unsigned int codec_tag;
2860 if (!oc->oformat->codec_tag ||
2861 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2862 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2863 enc_ctx->codec_tag = dec_ctx->codec_tag;
2866 enc_ctx->bit_rate = dec_ctx->bit_rate;
2867 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2868 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2869 enc_ctx->field_order = dec_ctx->field_order;
2870 if (dec_ctx->extradata_size) {
2871 enc_ctx->extradata = av_mallocz(extra_size);
2872 if (!enc_ctx->extradata) {
2873 return AVERROR(ENOMEM);
2875 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2877 enc_ctx->extradata_size= dec_ctx->extradata_size;
2878 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2880 enc_ctx->time_base = ist->st->time_base;
2882 * Avi is a special case here because it supports variable fps but
2883 * having the fps and timebase differe significantly adds quite some
2886 if(!strcmp(oc->oformat->name, "avi")) {
2887 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2888 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2889 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2890 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2892 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2893 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2894 enc_ctx->ticks_per_frame = 2;
2895 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2896 && av_q2d(ist->st->time_base) < 1.0/500
2898 enc_ctx->time_base = dec_ctx->time_base;
2899 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2900 enc_ctx->time_base.den *= 2;
2901 enc_ctx->ticks_per_frame = 2;
2903 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2904 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2905 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2906 && strcmp(oc->oformat->name, "f4v")
2908 if( copy_tb<0 && dec_ctx->time_base.den
2909 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2910 && av_q2d(ist->st->time_base) < 1.0/500
2912 enc_ctx->time_base = dec_ctx->time_base;
2913 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2916 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2917 && dec_ctx->time_base.num < dec_ctx->time_base.den
2918 && dec_ctx->time_base.num > 0
2919 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2920 enc_ctx->time_base = dec_ctx->time_base;
2923 if (!ost->frame_rate.num)
2924 ost->frame_rate = ist->framerate;
2925 if(ost->frame_rate.num)
2926 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2928 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2929 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2931 if (ist->st->nb_side_data) {
2932 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2933 sizeof(*ist->st->side_data));
2934 if (!ost->st->side_data)
2935 return AVERROR(ENOMEM);
2937 ost->st->nb_side_data = 0;
2938 for (j = 0; j < ist->st->nb_side_data; j++) {
2939 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2940 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2942 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2945 sd_dst->data = av_malloc(sd_src->size);
2947 return AVERROR(ENOMEM);
2948 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2949 sd_dst->size = sd_src->size;
2950 sd_dst->type = sd_src->type;
2951 ost->st->nb_side_data++;
2955 ost->parser = av_parser_init(enc_ctx->codec_id);
2957 switch (enc_ctx->codec_type) {
2958 case AVMEDIA_TYPE_AUDIO:
2959 if (audio_volume != 256) {
2960 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2963 enc_ctx->channel_layout = dec_ctx->channel_layout;
2964 enc_ctx->sample_rate = dec_ctx->sample_rate;
2965 enc_ctx->channels = dec_ctx->channels;
2966 enc_ctx->frame_size = dec_ctx->frame_size;
2967 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2968 enc_ctx->block_align = dec_ctx->block_align;
2969 enc_ctx->initial_padding = dec_ctx->delay;
2970 enc_ctx->profile = dec_ctx->profile;
2971 #if FF_API_AUDIOENC_DELAY
2972 enc_ctx->delay = dec_ctx->delay;
2974 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2975 enc_ctx->block_align= 0;
2976 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2977 enc_ctx->block_align= 0;
2979 case AVMEDIA_TYPE_VIDEO:
2980 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2981 enc_ctx->width = dec_ctx->width;
2982 enc_ctx->height = dec_ctx->height;
2983 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2984 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2986 av_mul_q(ost->frame_aspect_ratio,
2987 (AVRational){ enc_ctx->height, enc_ctx->width });
2988 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2989 "with stream copy may produce invalid files\n");
2991 else if (ist->st->sample_aspect_ratio.num)
2992 sar = ist->st->sample_aspect_ratio;
2994 sar = dec_ctx->sample_aspect_ratio;
2995 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2996 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2997 ost->st->r_frame_rate = ist->st->r_frame_rate;
2999 case AVMEDIA_TYPE_SUBTITLE:
3000 enc_ctx->width = dec_ctx->width;
3001 enc_ctx->height = dec_ctx->height;
3003 case AVMEDIA_TYPE_UNKNOWN:
3004 case AVMEDIA_TYPE_DATA:
3005 case AVMEDIA_TYPE_ATTACHMENT:
3012 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3014 /* should only happen when a default codec is not present. */
3015 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3016 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3017 ret = AVERROR(EINVAL);
3021 set_encoder_id(output_files[ost->file_index], ost);
3024 if (qsv_transcode_init(ost))
3029 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3030 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3032 fg = init_simple_filtergraph(ist, ost);
3033 if (configure_filtergraph(fg)) {
3034 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3039 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3040 if (!ost->frame_rate.num)
3041 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3042 if (ist && !ost->frame_rate.num)
3043 ost->frame_rate = ist->framerate;
3044 if (ist && !ost->frame_rate.num)
3045 ost->frame_rate = ist->st->r_frame_rate;
3046 if (ist && !ost->frame_rate.num) {
3047 ost->frame_rate = (AVRational){25, 1};
3048 av_log(NULL, AV_LOG_WARNING,
3050 "about the input framerate is available. Falling "
3051 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3052 "if you want a different framerate.\n",
3053 ost->file_index, ost->index);
3055 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3056 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3057 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3058 ost->frame_rate = ost->enc->supported_framerates[idx];
3060 // reduce frame rate for mpeg4 to be within the spec limits
3061 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3062 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3063 ost->frame_rate.num, ost->frame_rate.den, 65535);
3067 switch (enc_ctx->codec_type) {
3068 case AVMEDIA_TYPE_AUDIO:
3069 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3070 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3071 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3072 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3073 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3075 case AVMEDIA_TYPE_VIDEO:
3076 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3077 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3078 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3079 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3080 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3081 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3082 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3084 for (j = 0; j < ost->forced_kf_count; j++)
3085 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3087 enc_ctx->time_base);
3089 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3090 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3091 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3092 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3093 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3094 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3095 if (!strncmp(ost->enc->name, "libx264", 7) &&
3096 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3097 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3098 av_log(NULL, AV_LOG_WARNING,
3099 "No pixel format specified, %s for H.264 encoding chosen.\n"
3100 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3101 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3102 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3103 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3104 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3105 av_log(NULL, AV_LOG_WARNING,
3106 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3107 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3108 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3109 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3111 ost->st->avg_frame_rate = ost->frame_rate;
3114 enc_ctx->width != dec_ctx->width ||
3115 enc_ctx->height != dec_ctx->height ||
3116 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3117 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3120 if (ost->forced_keyframes) {
3121 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3122 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3123 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3125 av_log(NULL, AV_LOG_ERROR,
3126 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3129 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3130 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3132 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3134 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3135 // parse it only for static kf timings
3136 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3137 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3141 case AVMEDIA_TYPE_SUBTITLE:
3142 enc_ctx->time_base = (AVRational){1, 1000};
3143 if (!enc_ctx->width) {
3144 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3145 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3148 case AVMEDIA_TYPE_DATA:
3156 if (ost->disposition) {
3157 static const AVOption opts[] = {
3158 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3159 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3160 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3161 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3162 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3163 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3164 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3165 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3166 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3167 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3168 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3169 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3170 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3171 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3174 static const AVClass class = {
3176 .item_name = av_default_item_name,
3178 .version = LIBAVUTIL_VERSION_INT,
3180 const AVClass *pclass = &class;
3182 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3188 /* open each encoder */
3189 for (i = 0; i < nb_output_streams; i++) {
3190 ret = init_output_stream(output_streams[i], error, sizeof(error));
3195 /* init input streams */
3196 for (i = 0; i < nb_input_streams; i++)
3197 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3198 for (i = 0; i < nb_output_streams; i++) {
3199 ost = output_streams[i];
3200 avcodec_close(ost->enc_ctx);
3205 /* discard unused programs */
3206 for (i = 0; i < nb_input_files; i++) {
3207 InputFile *ifile = input_files[i];
3208 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3209 AVProgram *p = ifile->ctx->programs[j];
3210 int discard = AVDISCARD_ALL;
3212 for (k = 0; k < p->nb_stream_indexes; k++)
3213 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3214 discard = AVDISCARD_DEFAULT;
3217 p->discard = discard;
3221 /* open files and write file headers */
3222 for (i = 0; i < nb_output_files; i++) {
3223 oc = output_files[i]->ctx;
3224 oc->interrupt_callback = int_cb;
3225 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3226 snprintf(error, sizeof(error),
3227 "Could not write header for output file #%d "
3228 "(incorrect codec parameters ?): %s",
3229 i, av_err2str(ret));
3230 ret = AVERROR(EINVAL);
3233 // assert_avoptions(output_files[i]->opts);
3234 if (strcmp(oc->oformat->name, "rtp")) {
3240 /* dump the file output parameters - cannot be done before in case
3242 for (i = 0; i < nb_output_files; i++) {
3243 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3246 /* dump the stream mapping */
3247 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3248 for (i = 0; i < nb_input_streams; i++) {
3249 ist = input_streams[i];
3251 for (j = 0; j < ist->nb_filters; j++) {
3252 if (ist->filters[j]->graph->graph_desc) {
3253 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3254 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3255 ist->filters[j]->name);
3256 if (nb_filtergraphs > 1)
3257 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3258 av_log(NULL, AV_LOG_INFO, "\n");
3263 for (i = 0; i < nb_output_streams; i++) {
3264 ost = output_streams[i];
3266 if (ost->attachment_filename) {
3267 /* an attached file */
3268 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3269 ost->attachment_filename, ost->file_index, ost->index);
3273 if (ost->filter && ost->filter->graph->graph_desc) {
3274 /* output from a complex graph */
3275 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3276 if (nb_filtergraphs > 1)
3277 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3279 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3280 ost->index, ost->enc ? ost->enc->name : "?");
3284 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3285 input_streams[ost->source_index]->file_index,
3286 input_streams[ost->source_index]->st->index,
3289 if (ost->sync_ist != input_streams[ost->source_index])
3290 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3291 ost->sync_ist->file_index,
3292 ost->sync_ist->st->index);
3293 if (ost->stream_copy)
3294 av_log(NULL, AV_LOG_INFO, " (copy)");
3296 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3297 const AVCodec *out_codec = ost->enc;
3298 const char *decoder_name = "?";
3299 const char *in_codec_name = "?";
3300 const char *encoder_name = "?";
3301 const char *out_codec_name = "?";
3302 const AVCodecDescriptor *desc;
3305 decoder_name = in_codec->name;
3306 desc = avcodec_descriptor_get(in_codec->id);
3308 in_codec_name = desc->name;
3309 if (!strcmp(decoder_name, in_codec_name))
3310 decoder_name = "native";
3314 encoder_name = out_codec->name;
3315 desc = avcodec_descriptor_get(out_codec->id);
3317 out_codec_name = desc->name;
3318 if (!strcmp(encoder_name, out_codec_name))
3319 encoder_name = "native";
3322 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3323 in_codec_name, decoder_name,
3324 out_codec_name, encoder_name);
3326 av_log(NULL, AV_LOG_INFO, "\n");
3330 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3334 if (sdp_filename || want_sdp) {
3338 transcode_init_done = 1;
3343 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3344 static int need_output(void)
3348 for (i = 0; i < nb_output_streams; i++) {
3349 OutputStream *ost = output_streams[i];
3350 OutputFile *of = output_files[ost->file_index];
3351 AVFormatContext *os = output_files[ost->file_index]->ctx;
3353 if (ost->finished ||
3354 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3356 if (ost->frame_number >= ost->max_frames) {
3358 for (j = 0; j < of->ctx->nb_streams; j++)
3359 close_output_stream(output_streams[of->ost_index + j]);
3370 * Select the output stream to process.
3372 * @return selected output stream, or NULL if none available
3374 static OutputStream *choose_output(void)
3377 int64_t opts_min = INT64_MAX;
3378 OutputStream *ost_min = NULL;
3380 for (i = 0; i < nb_output_streams; i++) {
3381 OutputStream *ost = output_streams[i];
3382 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3383 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3385 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3386 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3388 if (!ost->finished && opts < opts_min) {
3390 ost_min = ost->unavailable ? NULL : ost;
3396 static void set_tty_echo(int on)
3400 if (tcgetattr(0, &tty) == 0) {
3401 if (on) tty.c_lflag |= ECHO;
3402 else tty.c_lflag &= ~ECHO;
3403 tcsetattr(0, TCSANOW, &tty);
3408 static int check_keyboard_interaction(int64_t cur_time)
3411 static int64_t last_time;
3412 if (received_nb_signals)
3413 return AVERROR_EXIT;
3414 /* read_key() returns 0 on EOF */
3415 if(cur_time - last_time >= 100000 && !run_as_daemon){
3417 last_time = cur_time;
3421 return AVERROR_EXIT;
3422 if (key == '+') av_log_set_level(av_log_get_level()+10);
3423 if (key == '-') av_log_set_level(av_log_get_level()-10);
3424 if (key == 's') qp_hist ^= 1;
3427 do_hex_dump = do_pkt_dump = 0;
3428 } else if(do_pkt_dump){
3432 av_log_set_level(AV_LOG_DEBUG);
3434 if (key == 'c' || key == 'C'){
3435 char buf[4096], target[64], command[256], arg[256] = {0};
3438 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3441 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3446 fprintf(stderr, "\n");
3448 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3449 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3450 target, time, command, arg);
3451 for (i = 0; i < nb_filtergraphs; i++) {
3452 FilterGraph *fg = filtergraphs[i];
3455 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3456 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3457 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3458 } else if (key == 'c') {
3459 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3460 ret = AVERROR_PATCHWELCOME;
3462 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3464 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3469 av_log(NULL, AV_LOG_ERROR,
3470 "Parse error, at least 3 arguments were expected, "
3471 "only %d given in string '%s'\n", n, buf);
3474 if (key == 'd' || key == 'D'){
3477 debug = input_streams[0]->st->codec->debug<<1;
3478 if(!debug) debug = 1;
3479 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3486 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3491 fprintf(stderr, "\n");
3492 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3493 fprintf(stderr,"error parsing debug value\n");
3495 for(i=0;i<nb_input_streams;i++) {
3496 input_streams[i]->st->codec->debug = debug;
3498 for(i=0;i<nb_output_streams;i++) {
3499 OutputStream *ost = output_streams[i];
3500 ost->enc_ctx->debug = debug;
3502 if(debug) av_log_set_level(AV_LOG_DEBUG);
3503 fprintf(stderr,"debug=%d\n", debug);
3506 fprintf(stderr, "key function\n"
3507 "? show this help\n"
3508 "+ increase verbosity\n"
3509 "- decrease verbosity\n"
3510 "c Send command to first matching filter supporting it\n"
3511 "C Send/Que command to all matching filters\n"
3512 "D cycle through available debug modes\n"
3513 "h dump packets/hex press to cycle through the 3 states\n"
3515 "s Show QP histogram\n"
3522 static void *input_thread(void *arg)
3525 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3530 ret = av_read_frame(f->ctx, &pkt);
3532 if (ret == AVERROR(EAGAIN)) {
3537 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3540 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3541 if (flags && ret == AVERROR(EAGAIN)) {
3543 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3544 av_log(f->ctx, AV_LOG_WARNING,
3545 "Thread message queue blocking; consider raising the "
3546 "thread_queue_size option (current value: %d)\n",
3547 f->thread_queue_size);
3550 if (ret != AVERROR_EOF)
3551 av_log(f->ctx, AV_LOG_ERROR,
3552 "Unable to send packet to main thread: %s\n",
3554 av_packet_unref(&pkt);
3555 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3563 static void free_input_threads(void)
3567 for (i = 0; i < nb_input_files; i++) {
3568 InputFile *f = input_files[i];
3571 if (!f || !f->in_thread_queue)
3573 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3574 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3575 av_packet_unref(&pkt);
3577 pthread_join(f->thread, NULL);
3579 av_thread_message_queue_free(&f->in_thread_queue);
3583 static int init_input_threads(void)
3587 if (nb_input_files == 1)
3590 for (i = 0; i < nb_input_files; i++) {
3591 InputFile *f = input_files[i];
3593 if (f->ctx->pb ? !f->ctx->pb->seekable :
3594 strcmp(f->ctx->iformat->name, "lavfi"))
3595 f->non_blocking = 1;
3596 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3597 f->thread_queue_size, sizeof(AVPacket));
3601 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3602 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3603 av_thread_message_queue_free(&f->in_thread_queue);
3604 return AVERROR(ret);
3610 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3612 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3614 AV_THREAD_MESSAGE_NONBLOCK : 0);
3618 static int get_input_packet(InputFile *f, AVPacket *pkt)
3622 for (i = 0; i < f->nb_streams; i++) {
3623 InputStream *ist = input_streams[f->ist_index + i];
3624 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3625 int64_t now = av_gettime_relative() - ist->start;
3627 return AVERROR(EAGAIN);
3632 if (nb_input_files > 1)
3633 return get_input_packet_mt(f, pkt);
3635 return av_read_frame(f->ctx, pkt);
3638 static int got_eagain(void)
3641 for (i = 0; i < nb_output_streams; i++)
3642 if (output_streams[i]->unavailable)
3647 static void reset_eagain(void)
3650 for (i = 0; i < nb_input_files; i++)
3651 input_files[i]->eagain = 0;
3652 for (i = 0; i < nb_output_streams; i++)
3653 output_streams[i]->unavailable = 0;
3656 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3657 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3658 AVRational time_base)
3664 return tmp_time_base;
3667 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3670 return tmp_time_base;
3676 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3679 AVCodecContext *avctx;
3680 int i, ret, has_audio = 0;
3681 int64_t duration = 0;
3683 ret = av_seek_frame(is, -1, is->start_time, 0);
3687 for (i = 0; i < ifile->nb_streams; i++) {
3688 ist = input_streams[ifile->ist_index + i];
3689 avctx = ist->dec_ctx;
3692 if (ist->decoding_needed) {
3693 process_input_packet(ist, NULL, 1);
3694 avcodec_flush_buffers(avctx);
3697 /* duration is the length of the last frame in a stream
3698 * when audio stream is present we don't care about
3699 * last video frame length because it's not defined exactly */
3700 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3704 for (i = 0; i < ifile->nb_streams; i++) {
3705 ist = input_streams[ifile->ist_index + i];
3706 avctx = ist->dec_ctx;
3709 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3710 AVRational sample_rate = {1, avctx->sample_rate};
3712 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3716 if (ist->framerate.num) {
3717 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3718 } else if (ist->st->avg_frame_rate.num) {
3719 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3720 } else duration = 1;
3722 if (!ifile->duration)
3723 ifile->time_base = ist->st->time_base;
3724 /* the total duration of the stream, max_pts - min_pts is
3725 * the duration of the stream without the last frame */
3726 duration += ist->max_pts - ist->min_pts;
3727 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3731 if (ifile->loop > 0)
3739 * - 0 -- one packet was read and processed
3740 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3741 * this function should be called again
3742 * - AVERROR_EOF -- this function should not be called again
3744 static int process_input(int file_index)
3746 InputFile *ifile = input_files[file_index];
3747 AVFormatContext *is;
3755 ret = get_input_packet(ifile, &pkt);
3757 if (ret == AVERROR(EAGAIN)) {
3761 if (ret < 0 && ifile->loop) {
3762 if ((ret = seek_to_start(ifile, is)) < 0)
3764 ret = get_input_packet(ifile, &pkt);
3767 if (ret != AVERROR_EOF) {
3768 print_error(is->filename, ret);
3773 for (i = 0; i < ifile->nb_streams; i++) {
3774 ist = input_streams[ifile->ist_index + i];
3775 if (ist->decoding_needed) {
3776 ret = process_input_packet(ist, NULL, 0);
3781 /* mark all outputs that don't go through lavfi as finished */
3782 for (j = 0; j < nb_output_streams; j++) {
3783 OutputStream *ost = output_streams[j];
3785 if (ost->source_index == ifile->ist_index + i &&
3786 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3787 finish_output_stream(ost);
3791 ifile->eof_reached = 1;
3792 return AVERROR(EAGAIN);
3798 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3799 is->streams[pkt.stream_index]);
3801 /* the following test is needed in case new streams appear
3802 dynamically in stream : we ignore them */
3803 if (pkt.stream_index >= ifile->nb_streams) {
3804 report_new_stream(file_index, &pkt);
3805 goto discard_packet;
3808 ist = input_streams[ifile->ist_index + pkt.stream_index];
3810 ist->data_size += pkt.size;
3814 goto discard_packet;
3816 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3817 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3822 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3823 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3824 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3825 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3826 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3827 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3828 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3829 av_ts2str(input_files[ist->file_index]->ts_offset),
3830 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3833 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3834 int64_t stime, stime2;
3835 // Correcting starttime based on the enabled streams
3836 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3837 // so we instead do it here as part of discontinuity handling
3838 if ( ist->next_dts == AV_NOPTS_VALUE
3839 && ifile->ts_offset == -is->start_time
3840 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3841 int64_t new_start_time = INT64_MAX;
3842 for (i=0; i<is->nb_streams; i++) {
3843 AVStream *st = is->streams[i];
3844 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3846 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3848 if (new_start_time > is->start_time) {
3849 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3850 ifile->ts_offset = -new_start_time;
3854 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3855 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3856 ist->wrap_correction_done = 1;
3858 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3859 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3860 ist->wrap_correction_done = 0;
3862 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3863 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3864 ist->wrap_correction_done = 0;
3868 /* add the stream-global side data to the first packet */
3869 if (ist->nb_packets == 1) {
3870 if (ist->st->nb_side_data)
3871 av_packet_split_side_data(&pkt);
3872 for (i = 0; i < ist->st->nb_side_data; i++) {
3873 AVPacketSideData *src_sd = &ist->st->side_data[i];
3876 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3878 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3881 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3885 memcpy(dst_data, src_sd->data, src_sd->size);
3889 if (pkt.dts != AV_NOPTS_VALUE)
3890 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3891 if (pkt.pts != AV_NOPTS_VALUE)
3892 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3894 if (pkt.pts != AV_NOPTS_VALUE)
3895 pkt.pts *= ist->ts_scale;
3896 if (pkt.dts != AV_NOPTS_VALUE)
3897 pkt.dts *= ist->ts_scale;
3899 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3900 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3901 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3902 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3903 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3904 int64_t delta = pkt_dts - ifile->last_ts;
3905 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3906 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3907 ifile->ts_offset -= delta;
3908 av_log(NULL, AV_LOG_DEBUG,
3909 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3910 delta, ifile->ts_offset);
3911 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3912 if (pkt.pts != AV_NOPTS_VALUE)
3913 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3917 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3918 if (pkt.pts != AV_NOPTS_VALUE) {
3919 pkt.pts += duration;
3920 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3921 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3924 if (pkt.dts != AV_NOPTS_VALUE)
3925 pkt.dts += duration;
3927 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3928 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3929 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3930 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3932 int64_t delta = pkt_dts - ist->next_dts;
3933 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3934 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3935 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3936 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3937 ifile->ts_offset -= delta;
3938 av_log(NULL, AV_LOG_DEBUG,
3939 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3940 delta, ifile->ts_offset);
3941 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3942 if (pkt.pts != AV_NOPTS_VALUE)
3943 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3946 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3947 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3948 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3949 pkt.dts = AV_NOPTS_VALUE;
3951 if (pkt.pts != AV_NOPTS_VALUE){
3952 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3953 delta = pkt_pts - ist->next_dts;
3954 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3955 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3956 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3957 pkt.pts = AV_NOPTS_VALUE;
3963 if (pkt.dts != AV_NOPTS_VALUE)
3964 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3967 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3968 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3969 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3970 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3971 av_ts2str(input_files[ist->file_index]->ts_offset),
3972 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3975 sub2video_heartbeat(ist, pkt.pts);
3977 process_input_packet(ist, &pkt, 0);
3980 av_packet_unref(&pkt);
3986 * Perform a step of transcoding for the specified filter graph.
3988 * @param[in] graph filter graph to consider
3989 * @param[out] best_ist input stream where a frame would allow to continue
3990 * @return 0 for success, <0 for error
3992 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3995 int nb_requests, nb_requests_max = 0;
3996 InputFilter *ifilter;
4000 ret = avfilter_graph_request_oldest(graph->graph);
4002 return reap_filters(0);
4004 if (ret == AVERROR_EOF) {
4005 ret = reap_filters(1);
4006 for (i = 0; i < graph->nb_outputs; i++)
4007 close_output_stream(graph->outputs[i]->ost);
4010 if (ret != AVERROR(EAGAIN))
4013 for (i = 0; i < graph->nb_inputs; i++) {
4014 ifilter = graph->inputs[i];
4016 if (input_files[ist->file_index]->eagain ||
4017 input_files[ist->file_index]->eof_reached)
4019 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4020 if (nb_requests > nb_requests_max) {
4021 nb_requests_max = nb_requests;
4027 for (i = 0; i < graph->nb_outputs; i++)
4028 graph->outputs[i]->ost->unavailable = 1;
4034 * Run a single step of transcoding.
4036 * @return 0 for success, <0 for error
4038 static int transcode_step(void)
4044 ost = choose_output();
4051 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4056 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4061 av_assert0(ost->source_index >= 0);
4062 ist = input_streams[ost->source_index];
4065 ret = process_input(ist->file_index);
4066 if (ret == AVERROR(EAGAIN)) {
4067 if (input_files[ist->file_index]->eagain)
4068 ost->unavailable = 1;
4073 return ret == AVERROR_EOF ? 0 : ret;
4075 return reap_filters(0);
4079 * The following code is the main loop of the file converter
4081 static int transcode(void)
4084 AVFormatContext *os;
4087 int64_t timer_start;
4088 int64_t total_packets_written = 0;
4090 ret = transcode_init();
4094 if (stdin_interaction) {
4095 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4098 timer_start = av_gettime_relative();
4101 if ((ret = init_input_threads()) < 0)
4105 while (!received_sigterm) {
4106 int64_t cur_time= av_gettime_relative();
4108 /* if 'q' pressed, exits */
4109 if (stdin_interaction)
4110 if (check_keyboard_interaction(cur_time) < 0)
4113 /* check if there's any stream where output is still needed */
4114 if (!need_output()) {
4115 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4119 ret = transcode_step();
4121 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4125 av_strerror(ret, errbuf, sizeof(errbuf));
4127 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4132 /* dump report by using the output first video and audio streams */
4133 print_report(0, timer_start, cur_time);
4136 free_input_threads();
4139 /* at the end of stream, we must flush the decoder buffers */
4140 for (i = 0; i < nb_input_streams; i++) {
4141 ist = input_streams[i];
4142 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4143 process_input_packet(ist, NULL, 0);
4150 /* write the trailer if needed and close file */
4151 for (i = 0; i < nb_output_files; i++) {
4152 os = output_files[i]->ctx;
4153 if ((ret = av_write_trailer(os)) < 0) {
4154 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4160 /* dump report by using the first video and audio streams */
4161 print_report(1, timer_start, av_gettime_relative());
4163 /* close each encoder */
4164 for (i = 0; i < nb_output_streams; i++) {
4165 ost = output_streams[i];
4166 if (ost->encoding_needed) {
4167 av_freep(&ost->enc_ctx->stats_in);
4169 total_packets_written += ost->packets_written;
4172 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4173 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4177 /* close each decoder */
4178 for (i = 0; i < nb_input_streams; i++) {
4179 ist = input_streams[i];
4180 if (ist->decoding_needed) {
4181 avcodec_close(ist->dec_ctx);
4182 if (ist->hwaccel_uninit)
4183 ist->hwaccel_uninit(ist->dec_ctx);
4192 free_input_threads();
4195 if (output_streams) {
4196 for (i = 0; i < nb_output_streams; i++) {
4197 ost = output_streams[i];
4200 fclose(ost->logfile);
4201 ost->logfile = NULL;
4203 av_freep(&ost->forced_kf_pts);
4204 av_freep(&ost->apad);
4205 av_freep(&ost->disposition);
4206 av_dict_free(&ost->encoder_opts);
4207 av_dict_free(&ost->sws_dict);
4208 av_dict_free(&ost->swr_opts);
4209 av_dict_free(&ost->resample_opts);
4217 static int64_t getutime(void)
4220 struct rusage rusage;
4222 getrusage(RUSAGE_SELF, &rusage);
4223 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4224 #elif HAVE_GETPROCESSTIMES
4226 FILETIME c, e, k, u;
4227 proc = GetCurrentProcess();
4228 GetProcessTimes(proc, &c, &e, &k, &u);
4229 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4231 return av_gettime_relative();
4235 static int64_t getmaxrss(void)
4237 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4238 struct rusage rusage;
4239 getrusage(RUSAGE_SELF, &rusage);
4240 return (int64_t)rusage.ru_maxrss * 1024;
4241 #elif HAVE_GETPROCESSMEMORYINFO
4243 PROCESS_MEMORY_COUNTERS memcounters;
4244 proc = GetCurrentProcess();
4245 memcounters.cb = sizeof(memcounters);
4246 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4247 return memcounters.PeakPagefileUsage;
4253 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4257 int main(int argc, char **argv)
4262 register_exit(ffmpeg_cleanup);
4264 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4266 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4267 parse_loglevel(argc, argv, options);
4269 if(argc>1 && !strcmp(argv[1], "-d")){
4271 av_log_set_callback(log_callback_null);
4276 avcodec_register_all();
4278 avdevice_register_all();
4280 avfilter_register_all();
4282 avformat_network_init();
4284 show_banner(argc, argv, options);
4288 /* parse options and open all input/output files */
4289 ret = ffmpeg_parse_options(argc, argv);
4293 if (nb_output_files <= 0 && nb_input_files == 0) {
4295 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4299 /* file converter / grab */
4300 if (nb_output_files <= 0) {
4301 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4305 // if (nb_input_files == 0) {
4306 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4310 current_time = ti = getutime();
4311 if (transcode() < 0)
4313 ti = getutime() - ti;
4315 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4317 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4318 decode_error_stat[0], decode_error_stat[1]);
4319 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4322 exit_program(received_nb_signals ? 255 : main_return_code);
4323 return main_return_code;