2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num && ost->is_cfr) {
676 if (pkt->duration > 0)
677 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
678 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
684 av_packet_split_side_data(pkt);
686 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
687 print_error("", ret);
692 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
693 if (pkt->dts != AV_NOPTS_VALUE &&
694 pkt->pts != AV_NOPTS_VALUE &&
695 pkt->dts > pkt->pts) {
696 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
698 ost->file_index, ost->st->index);
700 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
701 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
702 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
705 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
706 pkt->dts != AV_NOPTS_VALUE &&
707 ost->last_mux_dts != AV_NOPTS_VALUE) {
708 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
709 if (pkt->dts < max) {
710 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
711 av_log(s, loglevel, "Non-monotonous DTS in output stream "
712 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
713 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
715 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
718 av_log(s, loglevel, "changing to %"PRId64". This may result "
719 "in incorrect timestamps in the output file.\n",
721 if(pkt->pts >= pkt->dts)
722 pkt->pts = FFMAX(pkt->pts, max);
727 ost->last_mux_dts = pkt->dts;
729 ost->data_size += pkt->size;
730 ost->packets_written++;
732 pkt->stream_index = ost->index;
735 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
736 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
737 av_get_media_type_string(ost->enc_ctx->codec_type),
738 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
739 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
744 ret = av_interleaved_write_frame(s, pkt);
746 print_error("av_interleaved_write_frame()", ret);
747 main_return_code = 1;
748 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
750 av_packet_unref(pkt);
753 static void close_output_stream(OutputStream *ost)
755 OutputFile *of = output_files[ost->file_index];
757 ost->finished |= ENCODER_FINISHED;
759 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
760 of->recording_time = FFMIN(of->recording_time, end);
764 static int check_recording_time(OutputStream *ost)
766 OutputFile *of = output_files[ost->file_index];
768 if (of->recording_time != INT64_MAX &&
769 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
770 AV_TIME_BASE_Q) >= 0) {
771 close_output_stream(ost);
777 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
780 AVCodecContext *enc = ost->enc_ctx;
784 av_init_packet(&pkt);
788 if (!check_recording_time(ost))
791 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
792 frame->pts = ost->sync_opts;
793 ost->sync_opts = frame->pts + frame->nb_samples;
794 ost->samples_encoded += frame->nb_samples;
795 ost->frames_encoded++;
797 av_assert0(pkt.size || !pkt.data);
798 update_benchmark(NULL);
800 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
801 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
802 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
803 enc->time_base.num, enc->time_base.den);
806 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
807 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
810 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
813 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
816 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
817 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
818 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
819 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
822 write_frame(s, &pkt, ost);
826 static void do_subtitle_out(AVFormatContext *s,
831 int subtitle_out_max_size = 1024 * 1024;
832 int subtitle_out_size, nb, i;
837 if (sub->pts == AV_NOPTS_VALUE) {
838 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
847 subtitle_out = av_malloc(subtitle_out_max_size);
849 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
854 /* Note: DVB subtitle need one packet to draw them and one other
855 packet to clear them */
856 /* XXX: signal it in the codec context ? */
857 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
862 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
864 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
865 pts -= output_files[ost->file_index]->start_time;
866 for (i = 0; i < nb; i++) {
867 unsigned save_num_rects = sub->num_rects;
869 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
870 if (!check_recording_time(ost))
874 // start_display_time is required to be 0
875 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
876 sub->end_display_time -= sub->start_display_time;
877 sub->start_display_time = 0;
881 ost->frames_encoded++;
883 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
884 subtitle_out_max_size, sub);
886 sub->num_rects = save_num_rects;
887 if (subtitle_out_size < 0) {
888 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
892 av_init_packet(&pkt);
893 pkt.data = subtitle_out;
894 pkt.size = subtitle_out_size;
895 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
896 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
897 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
898 /* XXX: the pts correction is handled here. Maybe handling
899 it in the codec would be better */
901 pkt.pts += 90 * sub->start_display_time;
903 pkt.pts += 90 * sub->end_display_time;
906 write_frame(s, &pkt, ost);
910 static void do_video_out(AVFormatContext *s,
912 AVFrame *next_picture,
915 int ret, format_video_sync;
917 AVCodecContext *enc = ost->enc_ctx;
918 AVCodecContext *mux_enc = ost->st->codec;
919 int nb_frames, nb0_frames, i;
920 double delta, delta0;
923 InputStream *ist = NULL;
924 AVFilterContext *filter = ost->filter->filter;
926 if (ost->source_index >= 0)
927 ist = input_streams[ost->source_index];
929 if (filter->inputs[0]->frame_rate.num > 0 &&
930 filter->inputs[0]->frame_rate.den > 0)
931 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
933 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
934 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
936 if (!ost->filters_script &&
940 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
941 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
946 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
947 ost->last_nb0_frames[1],
948 ost->last_nb0_frames[2]);
950 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
951 delta = delta0 + duration;
953 /* by default, we output a single frame */
954 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
957 format_video_sync = video_sync_method;
958 if (format_video_sync == VSYNC_AUTO) {
959 if(!strcmp(s->oformat->name, "avi")) {
960 format_video_sync = VSYNC_VFR;
962 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
964 && format_video_sync == VSYNC_CFR
965 && input_files[ist->file_index]->ctx->nb_streams == 1
966 && input_files[ist->file_index]->input_ts_offset == 0) {
967 format_video_sync = VSYNC_VSCFR;
969 if (format_video_sync == VSYNC_CFR && copy_ts) {
970 format_video_sync = VSYNC_VSCFR;
973 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
977 format_video_sync != VSYNC_PASSTHROUGH &&
978 format_video_sync != VSYNC_DROP) {
980 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
982 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
983 sync_ipts = ost->sync_opts;
988 switch (format_video_sync) {
990 if (ost->frame_number == 0 && delta0 >= 0.5) {
991 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
994 ost->sync_opts = lrint(sync_ipts);
997 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
998 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1000 } else if (delta < -1.1)
1002 else if (delta > 1.1) {
1003 nb_frames = lrintf(delta);
1005 nb0_frames = lrintf(delta0 - 0.6);
1011 else if (delta > 0.6)
1012 ost->sync_opts = lrint(sync_ipts);
1015 case VSYNC_PASSTHROUGH:
1016 ost->sync_opts = lrint(sync_ipts);
1023 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1024 nb0_frames = FFMIN(nb0_frames, nb_frames);
1026 memmove(ost->last_nb0_frames + 1,
1027 ost->last_nb0_frames,
1028 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1029 ost->last_nb0_frames[0] = nb0_frames;
1031 if (nb0_frames == 0 && ost->last_dropped) {
1033 av_log(NULL, AV_LOG_VERBOSE,
1034 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1035 ost->frame_number, ost->st->index, ost->last_frame->pts);
1037 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1038 if (nb_frames > dts_error_threshold * 30) {
1039 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1043 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1044 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1046 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1048 /* duplicates frame if needed */
1049 for (i = 0; i < nb_frames; i++) {
1050 AVFrame *in_picture;
1051 av_init_packet(&pkt);
1055 if (i < nb0_frames && ost->last_frame) {
1056 in_picture = ost->last_frame;
1058 in_picture = next_picture;
1063 in_picture->pts = ost->sync_opts;
1066 if (!check_recording_time(ost))
1068 if (ost->frame_number >= ost->max_frames)
1072 #if FF_API_LAVF_FMT_RAWPICTURE
1073 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1074 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1075 /* raw pictures are written as AVPicture structure to
1076 avoid any copies. We support temporarily the older
1078 if (in_picture->interlaced_frame)
1079 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1081 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1082 pkt.data = (uint8_t *)in_picture;
1083 pkt.size = sizeof(AVPicture);
1084 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1085 pkt.flags |= AV_PKT_FLAG_KEY;
1087 write_frame(s, &pkt, ost);
1091 int got_packet, forced_keyframe = 0;
1094 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1095 ost->top_field_first >= 0)
1096 in_picture->top_field_first = !!ost->top_field_first;
1098 if (in_picture->interlaced_frame) {
1099 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1100 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1102 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1104 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1106 in_picture->quality = enc->global_quality;
1107 in_picture->pict_type = 0;
1109 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1110 in_picture->pts * av_q2d(enc->time_base) : NAN;
1111 if (ost->forced_kf_index < ost->forced_kf_count &&
1112 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1113 ost->forced_kf_index++;
1114 forced_keyframe = 1;
1115 } else if (ost->forced_keyframes_pexpr) {
1117 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1118 res = av_expr_eval(ost->forced_keyframes_pexpr,
1119 ost->forced_keyframes_expr_const_values, NULL);
1120 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1121 ost->forced_keyframes_expr_const_values[FKF_N],
1122 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1123 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1124 ost->forced_keyframes_expr_const_values[FKF_T],
1125 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1128 forced_keyframe = 1;
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1130 ost->forced_keyframes_expr_const_values[FKF_N];
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1132 ost->forced_keyframes_expr_const_values[FKF_T];
1133 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1136 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1137 } else if ( ost->forced_keyframes
1138 && !strncmp(ost->forced_keyframes, "source", 6)
1139 && in_picture->key_frame==1) {
1140 forced_keyframe = 1;
1143 if (forced_keyframe) {
1144 in_picture->pict_type = AV_PICTURE_TYPE_I;
1145 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1148 update_benchmark(NULL);
1150 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1151 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1152 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1153 enc->time_base.num, enc->time_base.den);
1156 ost->frames_encoded++;
1158 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1159 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1161 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1167 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1168 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1169 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1170 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1173 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1174 pkt.pts = ost->sync_opts;
1176 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1179 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1180 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1181 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1182 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1185 frame_size = pkt.size;
1186 write_frame(s, &pkt, ost);
1188 /* if two pass, output log */
1189 if (ost->logfile && enc->stats_out) {
1190 fprintf(ost->logfile, "%s", enc->stats_out);
1196 * For video, number of frames in == number of packets out.
1197 * But there may be reordering, so we can't throw away frames on encoder
1198 * flush, we need to limit them here, before they go into encoder.
1200 ost->frame_number++;
1202 if (vstats_filename && frame_size)
1203 do_video_stats(ost, frame_size);
1206 if (!ost->last_frame)
1207 ost->last_frame = av_frame_alloc();
1208 av_frame_unref(ost->last_frame);
1209 if (next_picture && ost->last_frame)
1210 av_frame_ref(ost->last_frame, next_picture);
1212 av_frame_free(&ost->last_frame);
1215 static double psnr(double d)
1217 return -10.0 * log10(d);
1220 static void do_video_stats(OutputStream *ost, int frame_size)
1222 AVCodecContext *enc;
1224 double ti1, bitrate, avg_bitrate;
1226 /* this is executed just the first time do_video_stats is called */
1228 vstats_file = fopen(vstats_filename, "w");
1236 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1237 frame_number = ost->st->nb_frames;
1238 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1239 ost->quality / (float)FF_QP2LAMBDA);
1241 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1242 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1244 fprintf(vstats_file,"f_size= %6d ", frame_size);
1245 /* compute pts value */
1246 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1250 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1251 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1252 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1253 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1254 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1258 static void finish_output_stream(OutputStream *ost)
1260 OutputFile *of = output_files[ost->file_index];
1263 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1266 for (i = 0; i < of->ctx->nb_streams; i++)
1267 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1272 * Get and encode new output from any of the filtergraphs, without causing
1275 * @return 0 for success, <0 for severe errors
1277 static int reap_filters(int flush)
1279 AVFrame *filtered_frame = NULL;
1282 /* Reap all buffers present in the buffer sinks */
1283 for (i = 0; i < nb_output_streams; i++) {
1284 OutputStream *ost = output_streams[i];
1285 OutputFile *of = output_files[ost->file_index];
1286 AVFilterContext *filter;
1287 AVCodecContext *enc = ost->enc_ctx;
1292 filter = ost->filter->filter;
1294 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1295 return AVERROR(ENOMEM);
1297 filtered_frame = ost->filtered_frame;
1300 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1301 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1302 AV_BUFFERSINK_FLAG_NO_REQUEST);
1304 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1305 av_log(NULL, AV_LOG_WARNING,
1306 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1307 } else if (flush && ret == AVERROR_EOF) {
1308 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1309 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1313 if (ost->finished) {
1314 av_frame_unref(filtered_frame);
1317 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1318 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1319 AVRational tb = enc->time_base;
1320 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1322 tb.den <<= extra_bits;
1324 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1325 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1326 float_pts /= 1 << extra_bits;
1327 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1328 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1330 filtered_frame->pts =
1331 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1332 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1334 //if (ost->source_index >= 0)
1335 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1337 switch (filter->inputs[0]->type) {
1338 case AVMEDIA_TYPE_VIDEO:
1339 if (!ost->frame_aspect_ratio.num)
1340 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1343 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1344 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1346 enc->time_base.num, enc->time_base.den);
1349 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1351 case AVMEDIA_TYPE_AUDIO:
1352 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1353 enc->channels != av_frame_get_channels(filtered_frame)) {
1354 av_log(NULL, AV_LOG_ERROR,
1355 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1358 do_audio_out(of->ctx, ost, filtered_frame);
1361 // TODO support subtitle filters
1365 av_frame_unref(filtered_frame);
1372 static void print_final_stats(int64_t total_size)
1374 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1375 uint64_t subtitle_size = 0;
1376 uint64_t data_size = 0;
1377 float percent = -1.0;
1381 for (i = 0; i < nb_output_streams; i++) {
1382 OutputStream *ost = output_streams[i];
1383 switch (ost->enc_ctx->codec_type) {
1384 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1385 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1386 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1387 default: other_size += ost->data_size; break;
1389 extra_size += ost->enc_ctx->extradata_size;
1390 data_size += ost->data_size;
1391 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1392 != AV_CODEC_FLAG_PASS1)
1396 if (data_size && total_size>0 && total_size >= data_size)
1397 percent = 100.0 * (total_size - data_size) / data_size;
1399 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1400 video_size / 1024.0,
1401 audio_size / 1024.0,
1402 subtitle_size / 1024.0,
1403 other_size / 1024.0,
1404 extra_size / 1024.0);
1406 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1408 av_log(NULL, AV_LOG_INFO, "unknown");
1409 av_log(NULL, AV_LOG_INFO, "\n");
1411 /* print verbose per-stream stats */
1412 for (i = 0; i < nb_input_files; i++) {
1413 InputFile *f = input_files[i];
1414 uint64_t total_packets = 0, total_size = 0;
1416 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1417 i, f->ctx->filename);
1419 for (j = 0; j < f->nb_streams; j++) {
1420 InputStream *ist = input_streams[f->ist_index + j];
1421 enum AVMediaType type = ist->dec_ctx->codec_type;
1423 total_size += ist->data_size;
1424 total_packets += ist->nb_packets;
1426 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1427 i, j, media_type_string(type));
1428 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1429 ist->nb_packets, ist->data_size);
1431 if (ist->decoding_needed) {
1432 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1433 ist->frames_decoded);
1434 if (type == AVMEDIA_TYPE_AUDIO)
1435 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1436 av_log(NULL, AV_LOG_VERBOSE, "; ");
1439 av_log(NULL, AV_LOG_VERBOSE, "\n");
1442 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1443 total_packets, total_size);
1446 for (i = 0; i < nb_output_files; i++) {
1447 OutputFile *of = output_files[i];
1448 uint64_t total_packets = 0, total_size = 0;
1450 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1451 i, of->ctx->filename);
1453 for (j = 0; j < of->ctx->nb_streams; j++) {
1454 OutputStream *ost = output_streams[of->ost_index + j];
1455 enum AVMediaType type = ost->enc_ctx->codec_type;
1457 total_size += ost->data_size;
1458 total_packets += ost->packets_written;
1460 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1461 i, j, media_type_string(type));
1462 if (ost->encoding_needed) {
1463 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1464 ost->frames_encoded);
1465 if (type == AVMEDIA_TYPE_AUDIO)
1466 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1467 av_log(NULL, AV_LOG_VERBOSE, "; ");
1470 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1471 ost->packets_written, ost->data_size);
1473 av_log(NULL, AV_LOG_VERBOSE, "\n");
1476 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1477 total_packets, total_size);
1479 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1480 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1482 av_log(NULL, AV_LOG_WARNING, "\n");
1484 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1489 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1492 AVBPrint buf_script;
1494 AVFormatContext *oc;
1496 AVCodecContext *enc;
1497 int frame_number, vid, i;
1500 int64_t pts = INT64_MIN + 1;
1501 static int64_t last_time = -1;
1502 static int qp_histogram[52];
1503 int hours, mins, secs, us;
1507 if (!print_stats && !is_last_report && !progress_avio)
1510 if (!is_last_report) {
1511 if (last_time == -1) {
1512 last_time = cur_time;
1515 if ((cur_time - last_time) < 500000)
1517 last_time = cur_time;
1520 t = (cur_time-timer_start) / 1000000.0;
1523 oc = output_files[0]->ctx;
1525 total_size = avio_size(oc->pb);
1526 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1527 total_size = avio_tell(oc->pb);
1531 av_bprint_init(&buf_script, 0, 1);
1532 for (i = 0; i < nb_output_streams; i++) {
1534 ost = output_streams[i];
1536 if (!ost->stream_copy)
1537 q = ost->quality / (float) FF_QP2LAMBDA;
1539 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1540 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1541 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1542 ost->file_index, ost->index, q);
1544 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1547 frame_number = ost->frame_number;
1548 fps = t > 1 ? frame_number / t : 0;
1549 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1550 frame_number, fps < 9.95, fps, q);
1551 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1552 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1553 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1554 ost->file_index, ost->index, q);
1556 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1560 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1562 for (j = 0; j < 32; j++)
1563 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1566 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1568 double error, error_sum = 0;
1569 double scale, scale_sum = 0;
1571 char type[3] = { 'Y','U','V' };
1572 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1573 for (j = 0; j < 3; j++) {
1574 if (is_last_report) {
1575 error = enc->error[j];
1576 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1578 error = ost->error[j];
1579 scale = enc->width * enc->height * 255.0 * 255.0;
1585 p = psnr(error / scale);
1586 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1587 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1588 ost->file_index, ost->index, type[j] | 32, p);
1590 p = psnr(error_sum / scale_sum);
1591 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1592 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1593 ost->file_index, ost->index, p);
1597 /* compute min output value */
1598 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1599 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1600 ost->st->time_base, AV_TIME_BASE_Q));
1602 nb_frames_drop += ost->last_dropped;
1605 secs = FFABS(pts) / AV_TIME_BASE;
1606 us = FFABS(pts) % AV_TIME_BASE;
1612 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1613 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1615 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1617 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1618 "size=%8.0fkB time=", total_size / 1024.0);
1620 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1621 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1622 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1623 (100 * us) / AV_TIME_BASE);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1627 av_bprintf(&buf_script, "bitrate=N/A\n");
1629 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1630 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1633 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1634 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1635 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1636 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1637 hours, mins, secs, us);
1639 if (nb_frames_dup || nb_frames_drop)
1640 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1641 nb_frames_dup, nb_frames_drop);
1642 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1643 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1647 av_bprintf(&buf_script, "speed=N/A\n");
1649 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1650 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1653 if (print_stats || is_last_report) {
1654 const char end = is_last_report ? '\n' : '\r';
1655 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1656 fprintf(stderr, "%s %c", buf, end);
1658 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1663 if (progress_avio) {
1664 av_bprintf(&buf_script, "progress=%s\n",
1665 is_last_report ? "end" : "continue");
1666 avio_write(progress_avio, buf_script.str,
1667 FFMIN(buf_script.len, buf_script.size - 1));
1668 avio_flush(progress_avio);
1669 av_bprint_finalize(&buf_script, NULL);
1670 if (is_last_report) {
1671 if ((ret = avio_closep(&progress_avio)) < 0)
1672 av_log(NULL, AV_LOG_ERROR,
1673 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1678 print_final_stats(total_size);
1681 static void flush_encoders(void)
1685 for (i = 0; i < nb_output_streams; i++) {
1686 OutputStream *ost = output_streams[i];
1687 AVCodecContext *enc = ost->enc_ctx;
1688 AVFormatContext *os = output_files[ost->file_index]->ctx;
1689 int stop_encoding = 0;
1691 if (!ost->encoding_needed)
1694 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1696 #if FF_API_LAVF_FMT_RAWPICTURE
1697 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1702 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1705 switch (enc->codec_type) {
1706 case AVMEDIA_TYPE_AUDIO:
1707 encode = avcodec_encode_audio2;
1710 case AVMEDIA_TYPE_VIDEO:
1711 encode = avcodec_encode_video2;
1722 av_init_packet(&pkt);
1726 update_benchmark(NULL);
1727 ret = encode(enc, &pkt, NULL, &got_packet);
1728 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1730 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1735 if (ost->logfile && enc->stats_out) {
1736 fprintf(ost->logfile, "%s", enc->stats_out);
1742 if (ost->finished & MUXER_FINISHED) {
1743 av_packet_unref(&pkt);
1746 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1747 pkt_size = pkt.size;
1748 write_frame(os, &pkt, ost);
1749 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1750 do_video_stats(ost, pkt_size);
1761 * Check whether a packet from ist should be written into ost at this time
1763 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1765 OutputFile *of = output_files[ost->file_index];
1766 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1768 if (ost->source_index != ist_index)
1774 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1780 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1782 OutputFile *of = output_files[ost->file_index];
1783 InputFile *f = input_files [ist->file_index];
1784 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1785 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1789 av_init_packet(&opkt);
1791 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1792 !ost->copy_initial_nonkeyframes)
1795 if (!ost->frame_number && !ost->copy_prior_start) {
1796 int64_t comp_start = start_time;
1797 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1798 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1799 if (pkt->pts == AV_NOPTS_VALUE ?
1800 ist->pts < comp_start :
1801 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1805 if (of->recording_time != INT64_MAX &&
1806 ist->pts >= of->recording_time + start_time) {
1807 close_output_stream(ost);
1811 if (f->recording_time != INT64_MAX) {
1812 start_time = f->ctx->start_time;
1813 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1814 start_time += f->start_time;
1815 if (ist->pts >= f->recording_time + start_time) {
1816 close_output_stream(ost);
1821 /* force the input stream PTS */
1822 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1825 if (pkt->pts != AV_NOPTS_VALUE)
1826 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1828 opkt.pts = AV_NOPTS_VALUE;
1830 if (pkt->dts == AV_NOPTS_VALUE)
1831 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1833 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1834 opkt.dts -= ost_tb_start_time;
1836 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1837 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1839 duration = ist->dec_ctx->frame_size;
1840 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1841 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1842 ost->st->time_base) - ost_tb_start_time;
1845 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1846 opkt.flags = pkt->flags;
1847 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1848 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1849 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1850 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1851 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1853 int ret = av_parser_change(ost->parser, ost->st->codec,
1854 &opkt.data, &opkt.size,
1855 pkt->data, pkt->size,
1856 pkt->flags & AV_PKT_FLAG_KEY);
1858 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1863 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1868 opkt.data = pkt->data;
1869 opkt.size = pkt->size;
1871 av_copy_packet_side_data(&opkt, pkt);
1873 #if FF_API_LAVF_FMT_RAWPICTURE
1874 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1875 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1876 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1877 /* store AVPicture in AVPacket, as expected by the output format */
1878 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1880 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1884 opkt.data = (uint8_t *)&pict;
1885 opkt.size = sizeof(AVPicture);
1886 opkt.flags |= AV_PKT_FLAG_KEY;
1890 write_frame(of->ctx, &opkt, ost);
1893 int guess_input_channel_layout(InputStream *ist)
1895 AVCodecContext *dec = ist->dec_ctx;
1897 if (!dec->channel_layout) {
1898 char layout_name[256];
1900 if (dec->channels > ist->guess_layout_max)
1902 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1903 if (!dec->channel_layout)
1905 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1906 dec->channels, dec->channel_layout);
1907 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1908 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1913 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1915 if (*got_output || ret<0)
1916 decode_error_stat[ret<0] ++;
1918 if (ret < 0 && exit_on_error)
1921 if (exit_on_error && *got_output && ist) {
1922 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1923 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1929 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1931 AVFrame *decoded_frame, *f;
1932 AVCodecContext *avctx = ist->dec_ctx;
1933 int i, ret, err = 0, resample_changed;
1934 AVRational decoded_frame_tb;
1936 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1937 return AVERROR(ENOMEM);
1938 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1939 return AVERROR(ENOMEM);
1940 decoded_frame = ist->decoded_frame;
1942 update_benchmark(NULL);
1943 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1944 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1946 if (ret >= 0 && avctx->sample_rate <= 0) {
1947 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1948 ret = AVERROR_INVALIDDATA;
1951 check_decode_result(ist, got_output, ret);
1953 if (!*got_output || ret < 0)
1956 ist->samples_decoded += decoded_frame->nb_samples;
1957 ist->frames_decoded++;
1960 /* increment next_dts to use for the case where the input stream does not
1961 have timestamps or there are multiple frames in the packet */
1962 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1964 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1968 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1969 ist->resample_channels != avctx->channels ||
1970 ist->resample_channel_layout != decoded_frame->channel_layout ||
1971 ist->resample_sample_rate != decoded_frame->sample_rate;
1972 if (resample_changed) {
1973 char layout1[64], layout2[64];
1975 if (!guess_input_channel_layout(ist)) {
1976 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1977 "layout for Input Stream #%d.%d\n", ist->file_index,
1981 decoded_frame->channel_layout = avctx->channel_layout;
1983 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1984 ist->resample_channel_layout);
1985 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1986 decoded_frame->channel_layout);
1988 av_log(NULL, AV_LOG_INFO,
1989 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1990 ist->file_index, ist->st->index,
1991 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1992 ist->resample_channels, layout1,
1993 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1994 avctx->channels, layout2);
1996 ist->resample_sample_fmt = decoded_frame->format;
1997 ist->resample_sample_rate = decoded_frame->sample_rate;
1998 ist->resample_channel_layout = decoded_frame->channel_layout;
1999 ist->resample_channels = avctx->channels;
2001 for (i = 0; i < nb_filtergraphs; i++)
2002 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2003 FilterGraph *fg = filtergraphs[i];
2004 if (configure_filtergraph(fg) < 0) {
2005 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2011 /* if the decoder provides a pts, use it instead of the last packet pts.
2012 the decoder could be delaying output by a packet or more. */
2013 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2014 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2015 decoded_frame_tb = avctx->time_base;
2016 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2017 decoded_frame->pts = decoded_frame->pkt_pts;
2018 decoded_frame_tb = ist->st->time_base;
2019 } else if (pkt->pts != AV_NOPTS_VALUE) {
2020 decoded_frame->pts = pkt->pts;
2021 decoded_frame_tb = ist->st->time_base;
2023 decoded_frame->pts = ist->dts;
2024 decoded_frame_tb = AV_TIME_BASE_Q;
2026 pkt->pts = AV_NOPTS_VALUE;
2027 if (decoded_frame->pts != AV_NOPTS_VALUE)
2028 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2029 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2030 (AVRational){1, avctx->sample_rate});
2031 ist->nb_samples = decoded_frame->nb_samples;
2032 for (i = 0; i < ist->nb_filters; i++) {
2033 if (i < ist->nb_filters - 1) {
2034 f = ist->filter_frame;
2035 err = av_frame_ref(f, decoded_frame);
2040 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2041 AV_BUFFERSRC_FLAG_PUSH);
2042 if (err == AVERROR_EOF)
2043 err = 0; /* ignore */
2047 decoded_frame->pts = AV_NOPTS_VALUE;
2049 av_frame_unref(ist->filter_frame);
2050 av_frame_unref(decoded_frame);
2051 return err < 0 ? err : ret;
2054 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2056 AVFrame *decoded_frame, *f;
2057 int i, ret = 0, err = 0, resample_changed;
2058 int64_t best_effort_timestamp;
2059 AVRational *frame_sample_aspect;
2061 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2062 return AVERROR(ENOMEM);
2063 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2064 return AVERROR(ENOMEM);
2065 decoded_frame = ist->decoded_frame;
2066 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2068 update_benchmark(NULL);
2069 ret = avcodec_decode_video2(ist->dec_ctx,
2070 decoded_frame, got_output, pkt);
2071 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2073 // The following line may be required in some cases where there is no parser
2074 // or the parser does not has_b_frames correctly
2075 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2076 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2077 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2079 av_log(ist->dec_ctx, AV_LOG_WARNING,
2080 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2081 "If you want to help, upload a sample "
2082 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2083 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2084 ist->dec_ctx->has_b_frames,
2085 ist->st->codec->has_b_frames);
2088 check_decode_result(ist, got_output, ret);
2090 if (*got_output && ret >= 0) {
2091 if (ist->dec_ctx->width != decoded_frame->width ||
2092 ist->dec_ctx->height != decoded_frame->height ||
2093 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2094 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2095 decoded_frame->width,
2096 decoded_frame->height,
2097 decoded_frame->format,
2098 ist->dec_ctx->width,
2099 ist->dec_ctx->height,
2100 ist->dec_ctx->pix_fmt);
2104 if (!*got_output || ret < 0)
2107 if(ist->top_field_first>=0)
2108 decoded_frame->top_field_first = ist->top_field_first;
2110 ist->frames_decoded++;
2112 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2113 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2117 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2119 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2120 if(best_effort_timestamp != AV_NOPTS_VALUE)
2121 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2124 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2125 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2126 ist->st->index, av_ts2str(decoded_frame->pts),
2127 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2128 best_effort_timestamp,
2129 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2130 decoded_frame->key_frame, decoded_frame->pict_type,
2131 ist->st->time_base.num, ist->st->time_base.den);
2136 if (ist->st->sample_aspect_ratio.num)
2137 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2139 resample_changed = ist->resample_width != decoded_frame->width ||
2140 ist->resample_height != decoded_frame->height ||
2141 ist->resample_pix_fmt != decoded_frame->format;
2142 if (resample_changed) {
2143 av_log(NULL, AV_LOG_INFO,
2144 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2145 ist->file_index, ist->st->index,
2146 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2147 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2149 ist->resample_width = decoded_frame->width;
2150 ist->resample_height = decoded_frame->height;
2151 ist->resample_pix_fmt = decoded_frame->format;
2153 for (i = 0; i < nb_filtergraphs; i++) {
2154 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2155 configure_filtergraph(filtergraphs[i]) < 0) {
2156 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2162 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2163 for (i = 0; i < ist->nb_filters; i++) {
2164 if (!frame_sample_aspect->num)
2165 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2167 if (i < ist->nb_filters - 1) {
2168 f = ist->filter_frame;
2169 err = av_frame_ref(f, decoded_frame);
2174 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2175 if (ret == AVERROR_EOF) {
2176 ret = 0; /* ignore */
2177 } else if (ret < 0) {
2178 av_log(NULL, AV_LOG_FATAL,
2179 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2185 av_frame_unref(ist->filter_frame);
2186 av_frame_unref(decoded_frame);
2187 return err < 0 ? err : ret;
2190 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2192 AVSubtitle subtitle;
2193 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2194 &subtitle, got_output, pkt);
2196 check_decode_result(NULL, got_output, ret);
2198 if (ret < 0 || !*got_output) {
2200 sub2video_flush(ist);
2204 if (ist->fix_sub_duration) {
2206 if (ist->prev_sub.got_output) {
2207 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2208 1000, AV_TIME_BASE);
2209 if (end < ist->prev_sub.subtitle.end_display_time) {
2210 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2211 "Subtitle duration reduced from %d to %d%s\n",
2212 ist->prev_sub.subtitle.end_display_time, end,
2213 end <= 0 ? ", dropping it" : "");
2214 ist->prev_sub.subtitle.end_display_time = end;
2217 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2218 FFSWAP(int, ret, ist->prev_sub.ret);
2219 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2227 sub2video_update(ist, &subtitle);
2229 if (!subtitle.num_rects)
2232 ist->frames_decoded++;
2234 for (i = 0; i < nb_output_streams; i++) {
2235 OutputStream *ost = output_streams[i];
2237 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2238 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2241 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2245 avsubtitle_free(&subtitle);
2249 static int send_filter_eof(InputStream *ist)
2252 for (i = 0; i < ist->nb_filters; i++) {
2253 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2260 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2261 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2267 if (!ist->saw_first_ts) {
2268 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2270 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2271 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2272 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2274 ist->saw_first_ts = 1;
2277 if (ist->next_dts == AV_NOPTS_VALUE)
2278 ist->next_dts = ist->dts;
2279 if (ist->next_pts == AV_NOPTS_VALUE)
2280 ist->next_pts = ist->pts;
2284 av_init_packet(&avpkt);
2292 if (pkt->dts != AV_NOPTS_VALUE) {
2293 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2294 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2295 ist->next_pts = ist->pts = ist->dts;
2298 // while we have more to decode or while the decoder did output something on EOF
2299 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2303 ist->pts = ist->next_pts;
2304 ist->dts = ist->next_dts;
2306 if (avpkt.size && avpkt.size != pkt->size &&
2307 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2308 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2309 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2310 ist->showed_multi_packet_warning = 1;
2313 switch (ist->dec_ctx->codec_type) {
2314 case AVMEDIA_TYPE_AUDIO:
2315 ret = decode_audio (ist, &avpkt, &got_output);
2317 case AVMEDIA_TYPE_VIDEO:
2318 ret = decode_video (ist, &avpkt, &got_output);
2319 if (avpkt.duration) {
2320 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2321 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2322 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2323 duration = ((int64_t)AV_TIME_BASE *
2324 ist->dec_ctx->framerate.den * ticks) /
2325 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2329 if(ist->dts != AV_NOPTS_VALUE && duration) {
2330 ist->next_dts += duration;
2332 ist->next_dts = AV_NOPTS_VALUE;
2335 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2337 case AVMEDIA_TYPE_SUBTITLE:
2338 ret = transcode_subtitles(ist, &avpkt, &got_output);
2345 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2346 ist->file_index, ist->st->index, av_err2str(ret));
2353 avpkt.pts= AV_NOPTS_VALUE;
2355 // touch data and size only if not EOF
2357 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2365 if (got_output && !pkt)
2369 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2370 /* except when looping we need to flush but not to send an EOF */
2371 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2372 int ret = send_filter_eof(ist);
2374 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2379 /* handle stream copy */
2380 if (!ist->decoding_needed) {
2381 ist->dts = ist->next_dts;
2382 switch (ist->dec_ctx->codec_type) {
2383 case AVMEDIA_TYPE_AUDIO:
2384 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2385 ist->dec_ctx->sample_rate;
2387 case AVMEDIA_TYPE_VIDEO:
2388 if (ist->framerate.num) {
2389 // TODO: Remove work-around for c99-to-c89 issue 7
2390 AVRational time_base_q = AV_TIME_BASE_Q;
2391 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2392 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2393 } else if (pkt->duration) {
2394 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2395 } else if(ist->dec_ctx->framerate.num != 0) {
2396 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2397 ist->next_dts += ((int64_t)AV_TIME_BASE *
2398 ist->dec_ctx->framerate.den * ticks) /
2399 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2403 ist->pts = ist->dts;
2404 ist->next_pts = ist->next_dts;
2406 for (i = 0; pkt && i < nb_output_streams; i++) {
2407 OutputStream *ost = output_streams[i];
2409 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2412 do_streamcopy(ist, ost, pkt);
2418 static void print_sdp(void)
2423 AVIOContext *sdp_pb;
2424 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2428 for (i = 0, j = 0; i < nb_output_files; i++) {
2429 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2430 avc[j] = output_files[i]->ctx;
2438 av_sdp_create(avc, j, sdp, sizeof(sdp));
2440 if (!sdp_filename) {
2441 printf("SDP:\n%s\n", sdp);
2444 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2445 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2447 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2448 avio_closep(&sdp_pb);
2449 av_freep(&sdp_filename);
2457 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2460 for (i = 0; hwaccels[i].name; i++)
2461 if (hwaccels[i].pix_fmt == pix_fmt)
2462 return &hwaccels[i];
2466 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2468 InputStream *ist = s->opaque;
2469 const enum AVPixelFormat *p;
2472 for (p = pix_fmts; *p != -1; p++) {
2473 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2474 const HWAccel *hwaccel;
2476 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2479 hwaccel = get_hwaccel(*p);
2481 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2482 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2485 ret = hwaccel->init(s);
2487 if (ist->hwaccel_id == hwaccel->id) {
2488 av_log(NULL, AV_LOG_FATAL,
2489 "%s hwaccel requested for input stream #%d:%d, "
2490 "but cannot be initialized.\n", hwaccel->name,
2491 ist->file_index, ist->st->index);
2492 return AV_PIX_FMT_NONE;
2496 ist->active_hwaccel_id = hwaccel->id;
2497 ist->hwaccel_pix_fmt = *p;
2504 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2506 InputStream *ist = s->opaque;
2508 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2509 return ist->hwaccel_get_buffer(s, frame, flags);
2511 return avcodec_default_get_buffer2(s, frame, flags);
2514 static int init_input_stream(int ist_index, char *error, int error_len)
2517 InputStream *ist = input_streams[ist_index];
2519 if (ist->decoding_needed) {
2520 AVCodec *codec = ist->dec;
2522 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2523 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2524 return AVERROR(EINVAL);
2527 ist->dec_ctx->opaque = ist;
2528 ist->dec_ctx->get_format = get_format;
2529 ist->dec_ctx->get_buffer2 = get_buffer;
2530 ist->dec_ctx->thread_safe_callbacks = 1;
2532 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2533 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2534 (ist->decoding_needed & DECODING_FOR_OST)) {
2535 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2536 if (ist->decoding_needed & DECODING_FOR_FILTER)
2537 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2540 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2541 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2542 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2543 if (ret == AVERROR_EXPERIMENTAL)
2544 abort_codec_experimental(codec, 0);
2546 snprintf(error, error_len,
2547 "Error while opening decoder for input stream "
2549 ist->file_index, ist->st->index, av_err2str(ret));
2552 assert_avoptions(ist->decoder_opts);
2555 ist->next_pts = AV_NOPTS_VALUE;
2556 ist->next_dts = AV_NOPTS_VALUE;
2561 static InputStream *get_input_stream(OutputStream *ost)
2563 if (ost->source_index >= 0)
2564 return input_streams[ost->source_index];
2568 static int compare_int64(const void *a, const void *b)
2570 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2573 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2577 if (ost->encoding_needed) {
2578 AVCodec *codec = ost->enc;
2579 AVCodecContext *dec = NULL;
2582 if ((ist = get_input_stream(ost)))
2584 if (dec && dec->subtitle_header) {
2585 /* ASS code assumes this buffer is null terminated so add extra byte. */
2586 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2587 if (!ost->enc_ctx->subtitle_header)
2588 return AVERROR(ENOMEM);
2589 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2590 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2592 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2593 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2594 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2596 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2597 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2598 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2600 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2601 if (ret == AVERROR_EXPERIMENTAL)
2602 abort_codec_experimental(codec, 1);
2603 snprintf(error, error_len,
2604 "Error while opening encoder for output stream #%d:%d - "
2605 "maybe incorrect parameters such as bit_rate, rate, width or height",
2606 ost->file_index, ost->index);
2609 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2610 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2611 av_buffersink_set_frame_size(ost->filter->filter,
2612 ost->enc_ctx->frame_size);
2613 assert_avoptions(ost->encoder_opts);
2614 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2615 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2616 " It takes bits/s as argument, not kbits/s\n");
2618 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2620 av_log(NULL, AV_LOG_FATAL,
2621 "Error initializing the output stream codec context.\n");
2625 if (ost->enc_ctx->nb_coded_side_data) {
2628 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2629 sizeof(*ost->st->side_data));
2630 if (!ost->st->side_data)
2631 return AVERROR(ENOMEM);
2633 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2634 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2635 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2637 sd_dst->data = av_malloc(sd_src->size);
2639 return AVERROR(ENOMEM);
2640 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2641 sd_dst->size = sd_src->size;
2642 sd_dst->type = sd_src->type;
2643 ost->st->nb_side_data++;
2647 // copy timebase while removing common factors
2648 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2649 ost->st->codec->codec= ost->enc_ctx->codec;
2651 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2653 av_log(NULL, AV_LOG_FATAL,
2654 "Error setting up codec context options.\n");
2657 // copy timebase while removing common factors
2658 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2664 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2665 AVCodecContext *avctx)
2668 int n = 1, i, size, index = 0;
2671 for (p = kf; *p; p++)
2675 pts = av_malloc_array(size, sizeof(*pts));
2677 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2682 for (i = 0; i < n; i++) {
2683 char *next = strchr(p, ',');
2688 if (!memcmp(p, "chapters", 8)) {
2690 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2693 if (avf->nb_chapters > INT_MAX - size ||
2694 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2696 av_log(NULL, AV_LOG_FATAL,
2697 "Could not allocate forced key frames array.\n");
2700 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2701 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2703 for (j = 0; j < avf->nb_chapters; j++) {
2704 AVChapter *c = avf->chapters[j];
2705 av_assert1(index < size);
2706 pts[index++] = av_rescale_q(c->start, c->time_base,
2707 avctx->time_base) + t;
2712 t = parse_time_or_die("force_key_frames", p, 1);
2713 av_assert1(index < size);
2714 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2721 av_assert0(index == size);
2722 qsort(pts, size, sizeof(*pts), compare_int64);
2723 ost->forced_kf_count = size;
2724 ost->forced_kf_pts = pts;
2727 static void report_new_stream(int input_index, AVPacket *pkt)
2729 InputFile *file = input_files[input_index];
2730 AVStream *st = file->ctx->streams[pkt->stream_index];
2732 if (pkt->stream_index < file->nb_streams_warn)
2734 av_log(file->ctx, AV_LOG_WARNING,
2735 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2736 av_get_media_type_string(st->codec->codec_type),
2737 input_index, pkt->stream_index,
2738 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2739 file->nb_streams_warn = pkt->stream_index + 1;
2742 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2744 AVDictionaryEntry *e;
2746 uint8_t *encoder_string;
2747 int encoder_string_len;
2748 int format_flags = 0;
2749 int codec_flags = 0;
2751 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2754 e = av_dict_get(of->opts, "fflags", NULL, 0);
2756 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2759 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2761 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2763 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2766 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2769 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2770 encoder_string = av_mallocz(encoder_string_len);
2771 if (!encoder_string)
2774 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2775 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2777 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2778 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2779 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2780 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2783 static int transcode_init(void)
2785 int ret = 0, i, j, k;
2786 AVFormatContext *oc;
2789 char error[1024] = {0};
2792 for (i = 0; i < nb_filtergraphs; i++) {
2793 FilterGraph *fg = filtergraphs[i];
2794 for (j = 0; j < fg->nb_outputs; j++) {
2795 OutputFilter *ofilter = fg->outputs[j];
2796 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2798 if (fg->nb_inputs != 1)
2800 for (k = nb_input_streams-1; k >= 0 ; k--)
2801 if (fg->inputs[0]->ist == input_streams[k])
2803 ofilter->ost->source_index = k;
2807 /* init framerate emulation */
2808 for (i = 0; i < nb_input_files; i++) {
2809 InputFile *ifile = input_files[i];
2810 if (ifile->rate_emu)
2811 for (j = 0; j < ifile->nb_streams; j++)
2812 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2815 /* for each output stream, we compute the right encoding parameters */
2816 for (i = 0; i < nb_output_streams; i++) {
2817 AVCodecContext *enc_ctx;
2818 AVCodecContext *dec_ctx = NULL;
2819 ost = output_streams[i];
2820 oc = output_files[ost->file_index]->ctx;
2821 ist = get_input_stream(ost);
2823 if (ost->attachment_filename)
2826 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2829 dec_ctx = ist->dec_ctx;
2831 ost->st->disposition = ist->st->disposition;
2832 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2833 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2835 for (j=0; j<oc->nb_streams; j++) {
2836 AVStream *st = oc->streams[j];
2837 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2840 if (j == oc->nb_streams)
2841 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2842 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2845 if (ost->stream_copy) {
2847 uint64_t extra_size;
2849 av_assert0(ist && !ost->filter);
2851 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2853 if (extra_size > INT_MAX) {
2854 return AVERROR(EINVAL);
2857 /* if stream_copy is selected, no need to decode or encode */
2858 enc_ctx->codec_id = dec_ctx->codec_id;
2859 enc_ctx->codec_type = dec_ctx->codec_type;
2861 if (!enc_ctx->codec_tag) {
2862 unsigned int codec_tag;
2863 if (!oc->oformat->codec_tag ||
2864 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2865 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2866 enc_ctx->codec_tag = dec_ctx->codec_tag;
2869 enc_ctx->bit_rate = dec_ctx->bit_rate;
2870 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2871 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2872 enc_ctx->field_order = dec_ctx->field_order;
2873 if (dec_ctx->extradata_size) {
2874 enc_ctx->extradata = av_mallocz(extra_size);
2875 if (!enc_ctx->extradata) {
2876 return AVERROR(ENOMEM);
2878 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2880 enc_ctx->extradata_size= dec_ctx->extradata_size;
2881 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2883 enc_ctx->time_base = ist->st->time_base;
2885 * Avi is a special case here because it supports variable fps but
2886 * having the fps and timebase differe significantly adds quite some
2889 if(!strcmp(oc->oformat->name, "avi")) {
2890 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2891 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2892 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2893 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2895 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2896 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2897 enc_ctx->ticks_per_frame = 2;
2898 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2899 && av_q2d(ist->st->time_base) < 1.0/500
2901 enc_ctx->time_base = dec_ctx->time_base;
2902 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2903 enc_ctx->time_base.den *= 2;
2904 enc_ctx->ticks_per_frame = 2;
2906 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2907 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2908 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2909 && strcmp(oc->oformat->name, "f4v")
2911 if( copy_tb<0 && dec_ctx->time_base.den
2912 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2913 && av_q2d(ist->st->time_base) < 1.0/500
2915 enc_ctx->time_base = dec_ctx->time_base;
2916 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2919 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2920 && dec_ctx->time_base.num < dec_ctx->time_base.den
2921 && dec_ctx->time_base.num > 0
2922 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2923 enc_ctx->time_base = dec_ctx->time_base;
2926 if (!ost->frame_rate.num)
2927 ost->frame_rate = ist->framerate;
2928 if(ost->frame_rate.num)
2929 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2931 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2932 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2934 if (ist->st->nb_side_data) {
2935 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2936 sizeof(*ist->st->side_data));
2937 if (!ost->st->side_data)
2938 return AVERROR(ENOMEM);
2940 ost->st->nb_side_data = 0;
2941 for (j = 0; j < ist->st->nb_side_data; j++) {
2942 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2943 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2945 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2948 sd_dst->data = av_malloc(sd_src->size);
2950 return AVERROR(ENOMEM);
2951 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2952 sd_dst->size = sd_src->size;
2953 sd_dst->type = sd_src->type;
2954 ost->st->nb_side_data++;
2958 ost->parser = av_parser_init(enc_ctx->codec_id);
2960 switch (enc_ctx->codec_type) {
2961 case AVMEDIA_TYPE_AUDIO:
2962 if (audio_volume != 256) {
2963 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2966 enc_ctx->channel_layout = dec_ctx->channel_layout;
2967 enc_ctx->sample_rate = dec_ctx->sample_rate;
2968 enc_ctx->channels = dec_ctx->channels;
2969 enc_ctx->frame_size = dec_ctx->frame_size;
2970 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2971 enc_ctx->block_align = dec_ctx->block_align;
2972 enc_ctx->initial_padding = dec_ctx->delay;
2973 enc_ctx->profile = dec_ctx->profile;
2974 #if FF_API_AUDIOENC_DELAY
2975 enc_ctx->delay = dec_ctx->delay;
2977 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2978 enc_ctx->block_align= 0;
2979 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2980 enc_ctx->block_align= 0;
2982 case AVMEDIA_TYPE_VIDEO:
2983 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2984 enc_ctx->width = dec_ctx->width;
2985 enc_ctx->height = dec_ctx->height;
2986 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2987 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2989 av_mul_q(ost->frame_aspect_ratio,
2990 (AVRational){ enc_ctx->height, enc_ctx->width });
2991 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2992 "with stream copy may produce invalid files\n");
2994 else if (ist->st->sample_aspect_ratio.num)
2995 sar = ist->st->sample_aspect_ratio;
2997 sar = dec_ctx->sample_aspect_ratio;
2998 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2999 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3000 ost->st->r_frame_rate = ist->st->r_frame_rate;
3002 case AVMEDIA_TYPE_SUBTITLE:
3003 enc_ctx->width = dec_ctx->width;
3004 enc_ctx->height = dec_ctx->height;
3006 case AVMEDIA_TYPE_UNKNOWN:
3007 case AVMEDIA_TYPE_DATA:
3008 case AVMEDIA_TYPE_ATTACHMENT:
3015 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3017 /* should only happen when a default codec is not present. */
3018 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3019 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3020 ret = AVERROR(EINVAL);
3024 set_encoder_id(output_files[ost->file_index], ost);
3027 if (qsv_transcode_init(ost))
3032 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3033 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3035 fg = init_simple_filtergraph(ist, ost);
3036 if (configure_filtergraph(fg)) {
3037 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3042 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3043 if (!ost->frame_rate.num)
3044 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3045 if (ist && !ost->frame_rate.num)
3046 ost->frame_rate = ist->framerate;
3047 if (ist && !ost->frame_rate.num)
3048 ost->frame_rate = ist->st->r_frame_rate;
3049 if (ist && !ost->frame_rate.num) {
3050 ost->frame_rate = (AVRational){25, 1};
3051 av_log(NULL, AV_LOG_WARNING,
3053 "about the input framerate is available. Falling "
3054 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3055 "if you want a different framerate.\n",
3056 ost->file_index, ost->index);
3058 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3059 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3060 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3061 ost->frame_rate = ost->enc->supported_framerates[idx];
3063 // reduce frame rate for mpeg4 to be within the spec limits
3064 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3065 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3066 ost->frame_rate.num, ost->frame_rate.den, 65535);
3070 switch (enc_ctx->codec_type) {
3071 case AVMEDIA_TYPE_AUDIO:
3072 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3073 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3074 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3075 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3076 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3078 case AVMEDIA_TYPE_VIDEO:
3079 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3080 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3081 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3082 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3083 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3084 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3085 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3087 for (j = 0; j < ost->forced_kf_count; j++)
3088 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3090 enc_ctx->time_base);
3092 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3093 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3094 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3095 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3096 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3097 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3098 if (!strncmp(ost->enc->name, "libx264", 7) &&
3099 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3100 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3101 av_log(NULL, AV_LOG_WARNING,
3102 "No pixel format specified, %s for H.264 encoding chosen.\n"
3103 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3104 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3105 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3106 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3107 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3108 av_log(NULL, AV_LOG_WARNING,
3109 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3110 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3111 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3112 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3114 ost->st->avg_frame_rate = ost->frame_rate;
3117 enc_ctx->width != dec_ctx->width ||
3118 enc_ctx->height != dec_ctx->height ||
3119 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3120 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3123 if (ost->forced_keyframes) {
3124 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3125 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3126 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3128 av_log(NULL, AV_LOG_ERROR,
3129 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3132 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3133 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3134 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3137 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3138 // parse it only for static kf timings
3139 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3140 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3144 case AVMEDIA_TYPE_SUBTITLE:
3145 enc_ctx->time_base = (AVRational){1, 1000};
3146 if (!enc_ctx->width) {
3147 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3148 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3151 case AVMEDIA_TYPE_DATA:
3159 if (ost->disposition) {
3160 static const AVOption opts[] = {
3161 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3162 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3163 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3164 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3165 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3166 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3167 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3168 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3169 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3170 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3171 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3172 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3173 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3174 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3177 static const AVClass class = {
3179 .item_name = av_default_item_name,
3181 .version = LIBAVUTIL_VERSION_INT,
3183 const AVClass *pclass = &class;
3185 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3191 /* open each encoder */
3192 for (i = 0; i < nb_output_streams; i++) {
3193 ret = init_output_stream(output_streams[i], error, sizeof(error));
3198 /* init input streams */
3199 for (i = 0; i < nb_input_streams; i++)
3200 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3201 for (i = 0; i < nb_output_streams; i++) {
3202 ost = output_streams[i];
3203 avcodec_close(ost->enc_ctx);
3208 /* discard unused programs */
3209 for (i = 0; i < nb_input_files; i++) {
3210 InputFile *ifile = input_files[i];
3211 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3212 AVProgram *p = ifile->ctx->programs[j];
3213 int discard = AVDISCARD_ALL;
3215 for (k = 0; k < p->nb_stream_indexes; k++)
3216 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3217 discard = AVDISCARD_DEFAULT;
3220 p->discard = discard;
3224 /* open files and write file headers */
3225 for (i = 0; i < nb_output_files; i++) {
3226 oc = output_files[i]->ctx;
3227 oc->interrupt_callback = int_cb;
3228 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3229 snprintf(error, sizeof(error),
3230 "Could not write header for output file #%d "
3231 "(incorrect codec parameters ?): %s",
3232 i, av_err2str(ret));
3233 ret = AVERROR(EINVAL);
3236 // assert_avoptions(output_files[i]->opts);
3237 if (strcmp(oc->oformat->name, "rtp")) {
3243 /* dump the file output parameters - cannot be done before in case
3245 for (i = 0; i < nb_output_files; i++) {
3246 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3249 /* dump the stream mapping */
3250 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3251 for (i = 0; i < nb_input_streams; i++) {
3252 ist = input_streams[i];
3254 for (j = 0; j < ist->nb_filters; j++) {
3255 if (ist->filters[j]->graph->graph_desc) {
3256 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3257 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3258 ist->filters[j]->name);
3259 if (nb_filtergraphs > 1)
3260 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3261 av_log(NULL, AV_LOG_INFO, "\n");
3266 for (i = 0; i < nb_output_streams; i++) {
3267 ost = output_streams[i];
3269 if (ost->attachment_filename) {
3270 /* an attached file */
3271 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3272 ost->attachment_filename, ost->file_index, ost->index);
3276 if (ost->filter && ost->filter->graph->graph_desc) {
3277 /* output from a complex graph */
3278 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3279 if (nb_filtergraphs > 1)
3280 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3282 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3283 ost->index, ost->enc ? ost->enc->name : "?");
3287 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3288 input_streams[ost->source_index]->file_index,
3289 input_streams[ost->source_index]->st->index,
3292 if (ost->sync_ist != input_streams[ost->source_index])
3293 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3294 ost->sync_ist->file_index,
3295 ost->sync_ist->st->index);
3296 if (ost->stream_copy)
3297 av_log(NULL, AV_LOG_INFO, " (copy)");
3299 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3300 const AVCodec *out_codec = ost->enc;
3301 const char *decoder_name = "?";
3302 const char *in_codec_name = "?";
3303 const char *encoder_name = "?";
3304 const char *out_codec_name = "?";
3305 const AVCodecDescriptor *desc;
3308 decoder_name = in_codec->name;
3309 desc = avcodec_descriptor_get(in_codec->id);
3311 in_codec_name = desc->name;
3312 if (!strcmp(decoder_name, in_codec_name))
3313 decoder_name = "native";
3317 encoder_name = out_codec->name;
3318 desc = avcodec_descriptor_get(out_codec->id);
3320 out_codec_name = desc->name;
3321 if (!strcmp(encoder_name, out_codec_name))
3322 encoder_name = "native";
3325 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3326 in_codec_name, decoder_name,
3327 out_codec_name, encoder_name);
3329 av_log(NULL, AV_LOG_INFO, "\n");
3333 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3337 if (sdp_filename || want_sdp) {
3341 transcode_init_done = 1;
3346 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3347 static int need_output(void)
3351 for (i = 0; i < nb_output_streams; i++) {
3352 OutputStream *ost = output_streams[i];
3353 OutputFile *of = output_files[ost->file_index];
3354 AVFormatContext *os = output_files[ost->file_index]->ctx;
3356 if (ost->finished ||
3357 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3359 if (ost->frame_number >= ost->max_frames) {
3361 for (j = 0; j < of->ctx->nb_streams; j++)
3362 close_output_stream(output_streams[of->ost_index + j]);
3373 * Select the output stream to process.
3375 * @return selected output stream, or NULL if none available
3377 static OutputStream *choose_output(void)
3380 int64_t opts_min = INT64_MAX;
3381 OutputStream *ost_min = NULL;
3383 for (i = 0; i < nb_output_streams; i++) {
3384 OutputStream *ost = output_streams[i];
3385 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3386 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3388 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3389 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3391 if (!ost->finished && opts < opts_min) {
3393 ost_min = ost->unavailable ? NULL : ost;
3399 static void set_tty_echo(int on)
3403 if (tcgetattr(0, &tty) == 0) {
3404 if (on) tty.c_lflag |= ECHO;
3405 else tty.c_lflag &= ~ECHO;
3406 tcsetattr(0, TCSANOW, &tty);
3411 static int check_keyboard_interaction(int64_t cur_time)
3414 static int64_t last_time;
3415 if (received_nb_signals)
3416 return AVERROR_EXIT;
3417 /* read_key() returns 0 on EOF */
3418 if(cur_time - last_time >= 100000 && !run_as_daemon){
3420 last_time = cur_time;
3424 return AVERROR_EXIT;
3425 if (key == '+') av_log_set_level(av_log_get_level()+10);
3426 if (key == '-') av_log_set_level(av_log_get_level()-10);
3427 if (key == 's') qp_hist ^= 1;
3430 do_hex_dump = do_pkt_dump = 0;
3431 } else if(do_pkt_dump){
3435 av_log_set_level(AV_LOG_DEBUG);
3437 if (key == 'c' || key == 'C'){
3438 char buf[4096], target[64], command[256], arg[256] = {0};
3441 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3444 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3449 fprintf(stderr, "\n");
3451 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3452 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3453 target, time, command, arg);
3454 for (i = 0; i < nb_filtergraphs; i++) {
3455 FilterGraph *fg = filtergraphs[i];
3458 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3459 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3460 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3461 } else if (key == 'c') {
3462 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3463 ret = AVERROR_PATCHWELCOME;
3465 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3467 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3472 av_log(NULL, AV_LOG_ERROR,
3473 "Parse error, at least 3 arguments were expected, "
3474 "only %d given in string '%s'\n", n, buf);
3477 if (key == 'd' || key == 'D'){
3480 debug = input_streams[0]->st->codec->debug<<1;
3481 if(!debug) debug = 1;
3482 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3489 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3494 fprintf(stderr, "\n");
3495 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3496 fprintf(stderr,"error parsing debug value\n");
3498 for(i=0;i<nb_input_streams;i++) {
3499 input_streams[i]->st->codec->debug = debug;
3501 for(i=0;i<nb_output_streams;i++) {
3502 OutputStream *ost = output_streams[i];
3503 ost->enc_ctx->debug = debug;
3505 if(debug) av_log_set_level(AV_LOG_DEBUG);
3506 fprintf(stderr,"debug=%d\n", debug);
3509 fprintf(stderr, "key function\n"
3510 "? show this help\n"
3511 "+ increase verbosity\n"
3512 "- decrease verbosity\n"
3513 "c Send command to first matching filter supporting it\n"
3514 "C Send/Que command to all matching filters\n"
3515 "D cycle through available debug modes\n"
3516 "h dump packets/hex press to cycle through the 3 states\n"
3518 "s Show QP histogram\n"
3525 static void *input_thread(void *arg)
3528 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3533 ret = av_read_frame(f->ctx, &pkt);
3535 if (ret == AVERROR(EAGAIN)) {
3540 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3543 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3544 if (flags && ret == AVERROR(EAGAIN)) {
3546 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3547 av_log(f->ctx, AV_LOG_WARNING,
3548 "Thread message queue blocking; consider raising the "
3549 "thread_queue_size option (current value: %d)\n",
3550 f->thread_queue_size);
3553 if (ret != AVERROR_EOF)
3554 av_log(f->ctx, AV_LOG_ERROR,
3555 "Unable to send packet to main thread: %s\n",
3557 av_packet_unref(&pkt);
3558 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3566 static void free_input_threads(void)
3570 for (i = 0; i < nb_input_files; i++) {
3571 InputFile *f = input_files[i];
3574 if (!f || !f->in_thread_queue)
3576 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3577 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3578 av_packet_unref(&pkt);
3580 pthread_join(f->thread, NULL);
3582 av_thread_message_queue_free(&f->in_thread_queue);
3586 static int init_input_threads(void)
3590 if (nb_input_files == 1)
3593 for (i = 0; i < nb_input_files; i++) {
3594 InputFile *f = input_files[i];
3596 if (f->ctx->pb ? !f->ctx->pb->seekable :
3597 strcmp(f->ctx->iformat->name, "lavfi"))
3598 f->non_blocking = 1;
3599 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3600 f->thread_queue_size, sizeof(AVPacket));
3604 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3605 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3606 av_thread_message_queue_free(&f->in_thread_queue);
3607 return AVERROR(ret);
3613 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3615 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3617 AV_THREAD_MESSAGE_NONBLOCK : 0);
3621 static int get_input_packet(InputFile *f, AVPacket *pkt)
3625 for (i = 0; i < f->nb_streams; i++) {
3626 InputStream *ist = input_streams[f->ist_index + i];
3627 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3628 int64_t now = av_gettime_relative() - ist->start;
3630 return AVERROR(EAGAIN);
3635 if (nb_input_files > 1)
3636 return get_input_packet_mt(f, pkt);
3638 return av_read_frame(f->ctx, pkt);
3641 static int got_eagain(void)
3644 for (i = 0; i < nb_output_streams; i++)
3645 if (output_streams[i]->unavailable)
3650 static void reset_eagain(void)
3653 for (i = 0; i < nb_input_files; i++)
3654 input_files[i]->eagain = 0;
3655 for (i = 0; i < nb_output_streams; i++)
3656 output_streams[i]->unavailable = 0;
3659 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3660 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3661 AVRational time_base)
3667 return tmp_time_base;
3670 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3673 return tmp_time_base;
3679 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3682 AVCodecContext *avctx;
3683 int i, ret, has_audio = 0;
3684 int64_t duration = 0;
3686 ret = av_seek_frame(is, -1, is->start_time, 0);
3690 for (i = 0; i < ifile->nb_streams; i++) {
3691 ist = input_streams[ifile->ist_index + i];
3692 avctx = ist->dec_ctx;
3695 if (ist->decoding_needed) {
3696 process_input_packet(ist, NULL, 1);
3697 avcodec_flush_buffers(avctx);
3700 /* duration is the length of the last frame in a stream
3701 * when audio stream is present we don't care about
3702 * last video frame length because it's not defined exactly */
3703 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3707 for (i = 0; i < ifile->nb_streams; i++) {
3708 ist = input_streams[ifile->ist_index + i];
3709 avctx = ist->dec_ctx;
3712 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3713 AVRational sample_rate = {1, avctx->sample_rate};
3715 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3719 if (ist->framerate.num) {
3720 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3721 } else if (ist->st->avg_frame_rate.num) {
3722 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3723 } else duration = 1;
3725 if (!ifile->duration)
3726 ifile->time_base = ist->st->time_base;
3727 /* the total duration of the stream, max_pts - min_pts is
3728 * the duration of the stream without the last frame */
3729 duration += ist->max_pts - ist->min_pts;
3730 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3734 if (ifile->loop > 0)
3742 * - 0 -- one packet was read and processed
3743 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3744 * this function should be called again
3745 * - AVERROR_EOF -- this function should not be called again
3747 static int process_input(int file_index)
3749 InputFile *ifile = input_files[file_index];
3750 AVFormatContext *is;
3758 ret = get_input_packet(ifile, &pkt);
3760 if (ret == AVERROR(EAGAIN)) {
3764 if (ret < 0 && ifile->loop) {
3765 if ((ret = seek_to_start(ifile, is)) < 0)
3767 ret = get_input_packet(ifile, &pkt);
3770 if (ret != AVERROR_EOF) {
3771 print_error(is->filename, ret);
3776 for (i = 0; i < ifile->nb_streams; i++) {
3777 ist = input_streams[ifile->ist_index + i];
3778 if (ist->decoding_needed) {
3779 ret = process_input_packet(ist, NULL, 0);
3784 /* mark all outputs that don't go through lavfi as finished */
3785 for (j = 0; j < nb_output_streams; j++) {
3786 OutputStream *ost = output_streams[j];
3788 if (ost->source_index == ifile->ist_index + i &&
3789 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3790 finish_output_stream(ost);
3794 ifile->eof_reached = 1;
3795 return AVERROR(EAGAIN);
3801 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3802 is->streams[pkt.stream_index]);
3804 /* the following test is needed in case new streams appear
3805 dynamically in stream : we ignore them */
3806 if (pkt.stream_index >= ifile->nb_streams) {
3807 report_new_stream(file_index, &pkt);
3808 goto discard_packet;
3811 ist = input_streams[ifile->ist_index + pkt.stream_index];
3813 ist->data_size += pkt.size;
3817 goto discard_packet;
3819 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3820 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3825 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3826 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3827 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3828 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3829 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3830 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3831 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3832 av_ts2str(input_files[ist->file_index]->ts_offset),
3833 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3836 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3837 int64_t stime, stime2;
3838 // Correcting starttime based on the enabled streams
3839 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3840 // so we instead do it here as part of discontinuity handling
3841 if ( ist->next_dts == AV_NOPTS_VALUE
3842 && ifile->ts_offset == -is->start_time
3843 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3844 int64_t new_start_time = INT64_MAX;
3845 for (i=0; i<is->nb_streams; i++) {
3846 AVStream *st = is->streams[i];
3847 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3849 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3851 if (new_start_time > is->start_time) {
3852 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3853 ifile->ts_offset = -new_start_time;
3857 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3858 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3859 ist->wrap_correction_done = 1;
3861 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3862 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3863 ist->wrap_correction_done = 0;
3865 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3866 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3867 ist->wrap_correction_done = 0;
3871 /* add the stream-global side data to the first packet */
3872 if (ist->nb_packets == 1) {
3873 if (ist->st->nb_side_data)
3874 av_packet_split_side_data(&pkt);
3875 for (i = 0; i < ist->st->nb_side_data; i++) {
3876 AVPacketSideData *src_sd = &ist->st->side_data[i];
3879 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3881 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3884 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3888 memcpy(dst_data, src_sd->data, src_sd->size);
3892 if (pkt.dts != AV_NOPTS_VALUE)
3893 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3894 if (pkt.pts != AV_NOPTS_VALUE)
3895 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3897 if (pkt.pts != AV_NOPTS_VALUE)
3898 pkt.pts *= ist->ts_scale;
3899 if (pkt.dts != AV_NOPTS_VALUE)
3900 pkt.dts *= ist->ts_scale;
3902 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3903 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3904 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3905 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3906 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3907 int64_t delta = pkt_dts - ifile->last_ts;
3908 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3909 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3910 ifile->ts_offset -= delta;
3911 av_log(NULL, AV_LOG_DEBUG,
3912 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3913 delta, ifile->ts_offset);
3914 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3915 if (pkt.pts != AV_NOPTS_VALUE)
3916 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3920 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3921 if (pkt.pts != AV_NOPTS_VALUE) {
3922 pkt.pts += duration;
3923 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3924 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3927 if (pkt.dts != AV_NOPTS_VALUE)
3928 pkt.dts += duration;
3930 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3931 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3932 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3933 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3935 int64_t delta = pkt_dts - ist->next_dts;
3936 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3937 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3938 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3939 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3940 ifile->ts_offset -= delta;
3941 av_log(NULL, AV_LOG_DEBUG,
3942 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3943 delta, ifile->ts_offset);
3944 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3945 if (pkt.pts != AV_NOPTS_VALUE)
3946 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3949 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3950 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3951 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3952 pkt.dts = AV_NOPTS_VALUE;
3954 if (pkt.pts != AV_NOPTS_VALUE){
3955 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3956 delta = pkt_pts - ist->next_dts;
3957 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3958 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3959 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3960 pkt.pts = AV_NOPTS_VALUE;
3966 if (pkt.dts != AV_NOPTS_VALUE)
3967 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3970 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3971 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3972 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3973 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3974 av_ts2str(input_files[ist->file_index]->ts_offset),
3975 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3978 sub2video_heartbeat(ist, pkt.pts);
3980 process_input_packet(ist, &pkt, 0);
3983 av_packet_unref(&pkt);
3989 * Perform a step of transcoding for the specified filter graph.
3991 * @param[in] graph filter graph to consider
3992 * @param[out] best_ist input stream where a frame would allow to continue
3993 * @return 0 for success, <0 for error
3995 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3998 int nb_requests, nb_requests_max = 0;
3999 InputFilter *ifilter;
4003 ret = avfilter_graph_request_oldest(graph->graph);
4005 return reap_filters(0);
4007 if (ret == AVERROR_EOF) {
4008 ret = reap_filters(1);
4009 for (i = 0; i < graph->nb_outputs; i++)
4010 close_output_stream(graph->outputs[i]->ost);
4013 if (ret != AVERROR(EAGAIN))
4016 for (i = 0; i < graph->nb_inputs; i++) {
4017 ifilter = graph->inputs[i];
4019 if (input_files[ist->file_index]->eagain ||
4020 input_files[ist->file_index]->eof_reached)
4022 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4023 if (nb_requests > nb_requests_max) {
4024 nb_requests_max = nb_requests;
4030 for (i = 0; i < graph->nb_outputs; i++)
4031 graph->outputs[i]->ost->unavailable = 1;
4037 * Run a single step of transcoding.
4039 * @return 0 for success, <0 for error
4041 static int transcode_step(void)
4047 ost = choose_output();
4054 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4059 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4064 av_assert0(ost->source_index >= 0);
4065 ist = input_streams[ost->source_index];
4068 ret = process_input(ist->file_index);
4069 if (ret == AVERROR(EAGAIN)) {
4070 if (input_files[ist->file_index]->eagain)
4071 ost->unavailable = 1;
4076 return ret == AVERROR_EOF ? 0 : ret;
4078 return reap_filters(0);
4082 * The following code is the main loop of the file converter
4084 static int transcode(void)
4087 AVFormatContext *os;
4090 int64_t timer_start;
4091 int64_t total_packets_written = 0;
4093 ret = transcode_init();
4097 if (stdin_interaction) {
4098 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4101 timer_start = av_gettime_relative();
4104 if ((ret = init_input_threads()) < 0)
4108 while (!received_sigterm) {
4109 int64_t cur_time= av_gettime_relative();
4111 /* if 'q' pressed, exits */
4112 if (stdin_interaction)
4113 if (check_keyboard_interaction(cur_time) < 0)
4116 /* check if there's any stream where output is still needed */
4117 if (!need_output()) {
4118 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4122 ret = transcode_step();
4124 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4128 av_strerror(ret, errbuf, sizeof(errbuf));
4130 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4135 /* dump report by using the output first video and audio streams */
4136 print_report(0, timer_start, cur_time);
4139 free_input_threads();
4142 /* at the end of stream, we must flush the decoder buffers */
4143 for (i = 0; i < nb_input_streams; i++) {
4144 ist = input_streams[i];
4145 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4146 process_input_packet(ist, NULL, 0);
4153 /* write the trailer if needed and close file */
4154 for (i = 0; i < nb_output_files; i++) {
4155 os = output_files[i]->ctx;
4156 if ((ret = av_write_trailer(os)) < 0) {
4157 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4163 /* dump report by using the first video and audio streams */
4164 print_report(1, timer_start, av_gettime_relative());
4166 /* close each encoder */
4167 for (i = 0; i < nb_output_streams; i++) {
4168 ost = output_streams[i];
4169 if (ost->encoding_needed) {
4170 av_freep(&ost->enc_ctx->stats_in);
4172 total_packets_written += ost->packets_written;
4175 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4176 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4180 /* close each decoder */
4181 for (i = 0; i < nb_input_streams; i++) {
4182 ist = input_streams[i];
4183 if (ist->decoding_needed) {
4184 avcodec_close(ist->dec_ctx);
4185 if (ist->hwaccel_uninit)
4186 ist->hwaccel_uninit(ist->dec_ctx);
4195 free_input_threads();
4198 if (output_streams) {
4199 for (i = 0; i < nb_output_streams; i++) {
4200 ost = output_streams[i];
4203 fclose(ost->logfile);
4204 ost->logfile = NULL;
4206 av_freep(&ost->forced_kf_pts);
4207 av_freep(&ost->apad);
4208 av_freep(&ost->disposition);
4209 av_dict_free(&ost->encoder_opts);
4210 av_dict_free(&ost->sws_dict);
4211 av_dict_free(&ost->swr_opts);
4212 av_dict_free(&ost->resample_opts);
4220 static int64_t getutime(void)
4223 struct rusage rusage;
4225 getrusage(RUSAGE_SELF, &rusage);
4226 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4227 #elif HAVE_GETPROCESSTIMES
4229 FILETIME c, e, k, u;
4230 proc = GetCurrentProcess();
4231 GetProcessTimes(proc, &c, &e, &k, &u);
4232 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4234 return av_gettime_relative();
4238 static int64_t getmaxrss(void)
4240 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4241 struct rusage rusage;
4242 getrusage(RUSAGE_SELF, &rusage);
4243 return (int64_t)rusage.ru_maxrss * 1024;
4244 #elif HAVE_GETPROCESSMEMORYINFO
4246 PROCESS_MEMORY_COUNTERS memcounters;
4247 proc = GetCurrentProcess();
4248 memcounters.cb = sizeof(memcounters);
4249 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4250 return memcounters.PeakPagefileUsage;
4256 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4260 int main(int argc, char **argv)
4265 register_exit(ffmpeg_cleanup);
4267 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4269 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4270 parse_loglevel(argc, argv, options);
4272 if(argc>1 && !strcmp(argv[1], "-d")){
4274 av_log_set_callback(log_callback_null);
4279 avcodec_register_all();
4281 avdevice_register_all();
4283 avfilter_register_all();
4285 avformat_network_init();
4287 show_banner(argc, argv, options);
4291 /* parse options and open all input/output files */
4292 ret = ffmpeg_parse_options(argc, argv);
4296 if (nb_output_files <= 0 && nb_input_files == 0) {
4298 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4302 /* file converter / grab */
4303 if (nb_output_files <= 0) {
4304 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4308 // if (nb_input_files == 0) {
4309 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4313 current_time = ti = getutime();
4314 if (transcode() < 0)
4316 ti = getutime() - ti;
4318 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4320 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4321 decode_error_stat[0], decode_error_stat[1]);
4322 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4325 exit_program(received_nb_signals ? 255 : main_return_code);
4326 return main_return_code;