2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num && ost->is_cfr) {
676 if (pkt->duration > 0)
677 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
678 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
684 av_packet_split_side_data(pkt);
687 AVPacket new_pkt = *pkt;
688 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
691 int a = av_bitstream_filter_filter(bsfc, avctx,
692 bsf_arg ? bsf_arg->value : NULL,
693 &new_pkt.data, &new_pkt.size,
694 pkt->data, pkt->size,
695 pkt->flags & AV_PKT_FLAG_KEY);
696 if(a == 0 && new_pkt.data != pkt->data) {
697 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
699 memcpy(t, new_pkt.data, new_pkt.size);
700 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
708 pkt->side_data = NULL;
709 pkt->side_data_elems = 0;
710 av_packet_unref(pkt);
711 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
712 av_buffer_default_free, NULL, 0);
717 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
718 bsfc->filter->name, pkt->stream_index,
719 avctx->codec ? avctx->codec->name : "copy");
729 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
730 if (pkt->dts != AV_NOPTS_VALUE &&
731 pkt->pts != AV_NOPTS_VALUE &&
732 pkt->dts > pkt->pts) {
733 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
735 ost->file_index, ost->st->index);
737 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
738 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
739 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
742 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
743 pkt->dts != AV_NOPTS_VALUE &&
744 ost->last_mux_dts != AV_NOPTS_VALUE) {
745 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
746 if (pkt->dts < max) {
747 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
748 av_log(s, loglevel, "Non-monotonous DTS in output stream "
749 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
750 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
752 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
755 av_log(s, loglevel, "changing to %"PRId64". This may result "
756 "in incorrect timestamps in the output file.\n",
758 if(pkt->pts >= pkt->dts)
759 pkt->pts = FFMAX(pkt->pts, max);
764 ost->last_mux_dts = pkt->dts;
766 ost->data_size += pkt->size;
767 ost->packets_written++;
769 pkt->stream_index = ost->index;
772 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
773 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
774 av_get_media_type_string(ost->enc_ctx->codec_type),
775 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
776 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
781 ret = av_interleaved_write_frame(s, pkt);
783 print_error("av_interleaved_write_frame()", ret);
784 main_return_code = 1;
785 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
787 av_packet_unref(pkt);
790 static void close_output_stream(OutputStream *ost)
792 OutputFile *of = output_files[ost->file_index];
794 ost->finished |= ENCODER_FINISHED;
796 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
797 of->recording_time = FFMIN(of->recording_time, end);
801 static int check_recording_time(OutputStream *ost)
803 OutputFile *of = output_files[ost->file_index];
805 if (of->recording_time != INT64_MAX &&
806 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
807 AV_TIME_BASE_Q) >= 0) {
808 close_output_stream(ost);
814 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
817 AVCodecContext *enc = ost->enc_ctx;
821 av_init_packet(&pkt);
825 if (!check_recording_time(ost))
828 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
829 frame->pts = ost->sync_opts;
830 ost->sync_opts = frame->pts + frame->nb_samples;
831 ost->samples_encoded += frame->nb_samples;
832 ost->frames_encoded++;
834 av_assert0(pkt.size || !pkt.data);
835 update_benchmark(NULL);
837 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
838 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
839 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
840 enc->time_base.num, enc->time_base.den);
843 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
844 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
847 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
850 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
853 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
854 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
855 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
856 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
859 write_frame(s, &pkt, ost);
863 static void do_subtitle_out(AVFormatContext *s,
868 int subtitle_out_max_size = 1024 * 1024;
869 int subtitle_out_size, nb, i;
874 if (sub->pts == AV_NOPTS_VALUE) {
875 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
884 subtitle_out = av_malloc(subtitle_out_max_size);
886 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
891 /* Note: DVB subtitle need one packet to draw them and one other
892 packet to clear them */
893 /* XXX: signal it in the codec context ? */
894 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
899 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
901 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
902 pts -= output_files[ost->file_index]->start_time;
903 for (i = 0; i < nb; i++) {
904 unsigned save_num_rects = sub->num_rects;
906 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
907 if (!check_recording_time(ost))
911 // start_display_time is required to be 0
912 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
913 sub->end_display_time -= sub->start_display_time;
914 sub->start_display_time = 0;
918 ost->frames_encoded++;
920 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
921 subtitle_out_max_size, sub);
923 sub->num_rects = save_num_rects;
924 if (subtitle_out_size < 0) {
925 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
929 av_init_packet(&pkt);
930 pkt.data = subtitle_out;
931 pkt.size = subtitle_out_size;
932 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
933 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
934 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
935 /* XXX: the pts correction is handled here. Maybe handling
936 it in the codec would be better */
938 pkt.pts += 90 * sub->start_display_time;
940 pkt.pts += 90 * sub->end_display_time;
943 write_frame(s, &pkt, ost);
947 static void do_video_out(AVFormatContext *s,
949 AVFrame *next_picture,
952 int ret, format_video_sync;
954 AVCodecContext *enc = ost->enc_ctx;
955 AVCodecContext *mux_enc = ost->st->codec;
956 int nb_frames, nb0_frames, i;
957 double delta, delta0;
960 InputStream *ist = NULL;
961 AVFilterContext *filter = ost->filter->filter;
963 if (ost->source_index >= 0)
964 ist = input_streams[ost->source_index];
966 if (filter->inputs[0]->frame_rate.num > 0 &&
967 filter->inputs[0]->frame_rate.den > 0)
968 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
970 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
971 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
973 if (!ost->filters_script &&
977 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
978 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
983 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
984 ost->last_nb0_frames[1],
985 ost->last_nb0_frames[2]);
987 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
988 delta = delta0 + duration;
990 /* by default, we output a single frame */
991 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
994 format_video_sync = video_sync_method;
995 if (format_video_sync == VSYNC_AUTO) {
996 if(!strcmp(s->oformat->name, "avi")) {
997 format_video_sync = VSYNC_VFR;
999 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1001 && format_video_sync == VSYNC_CFR
1002 && input_files[ist->file_index]->ctx->nb_streams == 1
1003 && input_files[ist->file_index]->input_ts_offset == 0) {
1004 format_video_sync = VSYNC_VSCFR;
1006 if (format_video_sync == VSYNC_CFR && copy_ts) {
1007 format_video_sync = VSYNC_VSCFR;
1010 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1014 format_video_sync != VSYNC_PASSTHROUGH &&
1015 format_video_sync != VSYNC_DROP) {
1016 if (delta0 < -0.6) {
1017 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1019 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1020 sync_ipts = ost->sync_opts;
1025 switch (format_video_sync) {
1027 if (ost->frame_number == 0 && delta0 >= 0.5) {
1028 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1031 ost->sync_opts = lrint(sync_ipts);
1034 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1035 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1037 } else if (delta < -1.1)
1039 else if (delta > 1.1) {
1040 nb_frames = lrintf(delta);
1042 nb0_frames = lrintf(delta0 - 0.6);
1048 else if (delta > 0.6)
1049 ost->sync_opts = lrint(sync_ipts);
1052 case VSYNC_PASSTHROUGH:
1053 ost->sync_opts = lrint(sync_ipts);
1060 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1061 nb0_frames = FFMIN(nb0_frames, nb_frames);
1063 memmove(ost->last_nb0_frames + 1,
1064 ost->last_nb0_frames,
1065 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1066 ost->last_nb0_frames[0] = nb0_frames;
1068 if (nb0_frames == 0 && ost->last_dropped) {
1070 av_log(NULL, AV_LOG_VERBOSE,
1071 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1072 ost->frame_number, ost->st->index, ost->last_frame->pts);
1074 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1075 if (nb_frames > dts_error_threshold * 30) {
1076 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1080 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1081 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1083 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1085 /* duplicates frame if needed */
1086 for (i = 0; i < nb_frames; i++) {
1087 AVFrame *in_picture;
1088 av_init_packet(&pkt);
1092 if (i < nb0_frames && ost->last_frame) {
1093 in_picture = ost->last_frame;
1095 in_picture = next_picture;
1100 in_picture->pts = ost->sync_opts;
1103 if (!check_recording_time(ost))
1105 if (ost->frame_number >= ost->max_frames)
1109 #if FF_API_LAVF_FMT_RAWPICTURE
1110 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1111 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1112 /* raw pictures are written as AVPicture structure to
1113 avoid any copies. We support temporarily the older
1115 if (in_picture->interlaced_frame)
1116 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1118 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1119 pkt.data = (uint8_t *)in_picture;
1120 pkt.size = sizeof(AVPicture);
1121 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1122 pkt.flags |= AV_PKT_FLAG_KEY;
1124 write_frame(s, &pkt, ost);
1128 int got_packet, forced_keyframe = 0;
1131 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1132 ost->top_field_first >= 0)
1133 in_picture->top_field_first = !!ost->top_field_first;
1135 if (in_picture->interlaced_frame) {
1136 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1137 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1139 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1141 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1143 in_picture->quality = enc->global_quality;
1144 in_picture->pict_type = 0;
1146 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1147 in_picture->pts * av_q2d(enc->time_base) : NAN;
1148 if (ost->forced_kf_index < ost->forced_kf_count &&
1149 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1150 ost->forced_kf_index++;
1151 forced_keyframe = 1;
1152 } else if (ost->forced_keyframes_pexpr) {
1154 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1155 res = av_expr_eval(ost->forced_keyframes_pexpr,
1156 ost->forced_keyframes_expr_const_values, NULL);
1157 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1158 ost->forced_keyframes_expr_const_values[FKF_N],
1159 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1160 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1161 ost->forced_keyframes_expr_const_values[FKF_T],
1162 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1165 forced_keyframe = 1;
1166 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1167 ost->forced_keyframes_expr_const_values[FKF_N];
1168 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1169 ost->forced_keyframes_expr_const_values[FKF_T];
1170 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1173 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1174 } else if ( ost->forced_keyframes
1175 && !strncmp(ost->forced_keyframes, "source", 6)
1176 && in_picture->key_frame==1) {
1177 forced_keyframe = 1;
1180 if (forced_keyframe) {
1181 in_picture->pict_type = AV_PICTURE_TYPE_I;
1182 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1185 update_benchmark(NULL);
1187 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1188 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1189 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1190 enc->time_base.num, enc->time_base.den);
1193 ost->frames_encoded++;
1195 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1196 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1198 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1204 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1205 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1206 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1207 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1210 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1211 pkt.pts = ost->sync_opts;
1213 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1216 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1217 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1218 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1219 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1222 frame_size = pkt.size;
1223 write_frame(s, &pkt, ost);
1225 /* if two pass, output log */
1226 if (ost->logfile && enc->stats_out) {
1227 fprintf(ost->logfile, "%s", enc->stats_out);
1233 * For video, number of frames in == number of packets out.
1234 * But there may be reordering, so we can't throw away frames on encoder
1235 * flush, we need to limit them here, before they go into encoder.
1237 ost->frame_number++;
1239 if (vstats_filename && frame_size)
1240 do_video_stats(ost, frame_size);
1243 if (!ost->last_frame)
1244 ost->last_frame = av_frame_alloc();
1245 av_frame_unref(ost->last_frame);
1246 if (next_picture && ost->last_frame)
1247 av_frame_ref(ost->last_frame, next_picture);
1249 av_frame_free(&ost->last_frame);
1252 static double psnr(double d)
1254 return -10.0 * log10(d);
1257 static void do_video_stats(OutputStream *ost, int frame_size)
1259 AVCodecContext *enc;
1261 double ti1, bitrate, avg_bitrate;
1263 /* this is executed just the first time do_video_stats is called */
1265 vstats_file = fopen(vstats_filename, "w");
1273 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1274 frame_number = ost->st->nb_frames;
1275 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1276 ost->quality / (float)FF_QP2LAMBDA);
1278 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1279 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1281 fprintf(vstats_file,"f_size= %6d ", frame_size);
1282 /* compute pts value */
1283 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1287 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1288 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1289 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1290 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1291 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1295 static void finish_output_stream(OutputStream *ost)
1297 OutputFile *of = output_files[ost->file_index];
1300 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1303 for (i = 0; i < of->ctx->nb_streams; i++)
1304 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1309 * Get and encode new output from any of the filtergraphs, without causing
1312 * @return 0 for success, <0 for severe errors
1314 static int reap_filters(int flush)
1316 AVFrame *filtered_frame = NULL;
1319 /* Reap all buffers present in the buffer sinks */
1320 for (i = 0; i < nb_output_streams; i++) {
1321 OutputStream *ost = output_streams[i];
1322 OutputFile *of = output_files[ost->file_index];
1323 AVFilterContext *filter;
1324 AVCodecContext *enc = ost->enc_ctx;
1329 filter = ost->filter->filter;
1331 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1332 return AVERROR(ENOMEM);
1334 filtered_frame = ost->filtered_frame;
1337 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1338 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1339 AV_BUFFERSINK_FLAG_NO_REQUEST);
1341 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1342 av_log(NULL, AV_LOG_WARNING,
1343 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1344 } else if (flush && ret == AVERROR_EOF) {
1345 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1346 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1350 if (ost->finished) {
1351 av_frame_unref(filtered_frame);
1354 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1355 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1356 AVRational tb = enc->time_base;
1357 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1359 tb.den <<= extra_bits;
1361 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1362 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1363 float_pts /= 1 << extra_bits;
1364 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1365 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1367 filtered_frame->pts =
1368 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1369 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1371 //if (ost->source_index >= 0)
1372 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1374 switch (filter->inputs[0]->type) {
1375 case AVMEDIA_TYPE_VIDEO:
1376 if (!ost->frame_aspect_ratio.num)
1377 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1380 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1381 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1383 enc->time_base.num, enc->time_base.den);
1386 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1388 case AVMEDIA_TYPE_AUDIO:
1389 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1390 enc->channels != av_frame_get_channels(filtered_frame)) {
1391 av_log(NULL, AV_LOG_ERROR,
1392 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1395 do_audio_out(of->ctx, ost, filtered_frame);
1398 // TODO support subtitle filters
1402 av_frame_unref(filtered_frame);
1409 static void print_final_stats(int64_t total_size)
1411 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1412 uint64_t subtitle_size = 0;
1413 uint64_t data_size = 0;
1414 float percent = -1.0;
1418 for (i = 0; i < nb_output_streams; i++) {
1419 OutputStream *ost = output_streams[i];
1420 switch (ost->enc_ctx->codec_type) {
1421 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1422 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1423 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1424 default: other_size += ost->data_size; break;
1426 extra_size += ost->enc_ctx->extradata_size;
1427 data_size += ost->data_size;
1428 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1429 != AV_CODEC_FLAG_PASS1)
1433 if (data_size && total_size>0 && total_size >= data_size)
1434 percent = 100.0 * (total_size - data_size) / data_size;
1436 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1437 video_size / 1024.0,
1438 audio_size / 1024.0,
1439 subtitle_size / 1024.0,
1440 other_size / 1024.0,
1441 extra_size / 1024.0);
1443 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1445 av_log(NULL, AV_LOG_INFO, "unknown");
1446 av_log(NULL, AV_LOG_INFO, "\n");
1448 /* print verbose per-stream stats */
1449 for (i = 0; i < nb_input_files; i++) {
1450 InputFile *f = input_files[i];
1451 uint64_t total_packets = 0, total_size = 0;
1453 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1454 i, f->ctx->filename);
1456 for (j = 0; j < f->nb_streams; j++) {
1457 InputStream *ist = input_streams[f->ist_index + j];
1458 enum AVMediaType type = ist->dec_ctx->codec_type;
1460 total_size += ist->data_size;
1461 total_packets += ist->nb_packets;
1463 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1464 i, j, media_type_string(type));
1465 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1466 ist->nb_packets, ist->data_size);
1468 if (ist->decoding_needed) {
1469 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1470 ist->frames_decoded);
1471 if (type == AVMEDIA_TYPE_AUDIO)
1472 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1473 av_log(NULL, AV_LOG_VERBOSE, "; ");
1476 av_log(NULL, AV_LOG_VERBOSE, "\n");
1479 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1480 total_packets, total_size);
1483 for (i = 0; i < nb_output_files; i++) {
1484 OutputFile *of = output_files[i];
1485 uint64_t total_packets = 0, total_size = 0;
1487 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1488 i, of->ctx->filename);
1490 for (j = 0; j < of->ctx->nb_streams; j++) {
1491 OutputStream *ost = output_streams[of->ost_index + j];
1492 enum AVMediaType type = ost->enc_ctx->codec_type;
1494 total_size += ost->data_size;
1495 total_packets += ost->packets_written;
1497 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1498 i, j, media_type_string(type));
1499 if (ost->encoding_needed) {
1500 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1501 ost->frames_encoded);
1502 if (type == AVMEDIA_TYPE_AUDIO)
1503 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1504 av_log(NULL, AV_LOG_VERBOSE, "; ");
1507 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1508 ost->packets_written, ost->data_size);
1510 av_log(NULL, AV_LOG_VERBOSE, "\n");
1513 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1514 total_packets, total_size);
1516 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1517 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1519 av_log(NULL, AV_LOG_WARNING, "\n");
1521 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1526 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1529 AVBPrint buf_script;
1531 AVFormatContext *oc;
1533 AVCodecContext *enc;
1534 int frame_number, vid, i;
1536 int64_t pts = INT64_MIN + 1;
1537 static int64_t last_time = -1;
1538 static int qp_histogram[52];
1539 int hours, mins, secs, us;
1541 if (!print_stats && !is_last_report && !progress_avio)
1544 if (!is_last_report) {
1545 if (last_time == -1) {
1546 last_time = cur_time;
1549 if ((cur_time - last_time) < 500000)
1551 last_time = cur_time;
1555 oc = output_files[0]->ctx;
1557 total_size = avio_size(oc->pb);
1558 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1559 total_size = avio_tell(oc->pb);
1563 av_bprint_init(&buf_script, 0, 1);
1564 for (i = 0; i < nb_output_streams; i++) {
1566 ost = output_streams[i];
1568 if (!ost->stream_copy)
1569 q = ost->quality / (float) FF_QP2LAMBDA;
1571 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1572 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1573 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1574 ost->file_index, ost->index, q);
1576 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1577 float fps, t = (cur_time-timer_start) / 1000000.0;
1579 frame_number = ost->frame_number;
1580 fps = t > 1 ? frame_number / t : 0;
1581 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1582 frame_number, fps < 9.95, fps, q);
1583 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1584 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1585 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1586 ost->file_index, ost->index, q);
1588 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1592 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1594 for (j = 0; j < 32; j++)
1595 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1598 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1600 double error, error_sum = 0;
1601 double scale, scale_sum = 0;
1603 char type[3] = { 'Y','U','V' };
1604 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1605 for (j = 0; j < 3; j++) {
1606 if (is_last_report) {
1607 error = enc->error[j];
1608 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1610 error = ost->error[j];
1611 scale = enc->width * enc->height * 255.0 * 255.0;
1617 p = psnr(error / scale);
1618 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1619 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1620 ost->file_index, ost->index, type[j] | 32, p);
1622 p = psnr(error_sum / scale_sum);
1623 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1624 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1625 ost->file_index, ost->index, p);
1629 /* compute min output value */
1630 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1631 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1632 ost->st->time_base, AV_TIME_BASE_Q));
1634 nb_frames_drop += ost->last_dropped;
1637 secs = FFABS(pts) / AV_TIME_BASE;
1638 us = FFABS(pts) % AV_TIME_BASE;
1644 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1646 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1648 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1649 "size=%8.0fkB time=", total_size / 1024.0);
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1653 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1654 (100 * us) / AV_TIME_BASE);
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1658 av_bprintf(&buf_script, "bitrate=N/A\n");
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1661 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1664 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1665 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1666 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1667 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1668 hours, mins, secs, us);
1670 if (nb_frames_dup || nb_frames_drop)
1671 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1672 nb_frames_dup, nb_frames_drop);
1673 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1674 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1676 if (print_stats || is_last_report) {
1677 const char end = is_last_report ? '\n' : '\r';
1678 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1679 fprintf(stderr, "%s %c", buf, end);
1681 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1686 if (progress_avio) {
1687 av_bprintf(&buf_script, "progress=%s\n",
1688 is_last_report ? "end" : "continue");
1689 avio_write(progress_avio, buf_script.str,
1690 FFMIN(buf_script.len, buf_script.size - 1));
1691 avio_flush(progress_avio);
1692 av_bprint_finalize(&buf_script, NULL);
1693 if (is_last_report) {
1694 avio_closep(&progress_avio);
1699 print_final_stats(total_size);
1702 static void flush_encoders(void)
1706 for (i = 0; i < nb_output_streams; i++) {
1707 OutputStream *ost = output_streams[i];
1708 AVCodecContext *enc = ost->enc_ctx;
1709 AVFormatContext *os = output_files[ost->file_index]->ctx;
1710 int stop_encoding = 0;
1712 if (!ost->encoding_needed)
1715 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1717 #if FF_API_LAVF_FMT_RAWPICTURE
1718 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1723 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1726 switch (enc->codec_type) {
1727 case AVMEDIA_TYPE_AUDIO:
1728 encode = avcodec_encode_audio2;
1731 case AVMEDIA_TYPE_VIDEO:
1732 encode = avcodec_encode_video2;
1743 av_init_packet(&pkt);
1747 update_benchmark(NULL);
1748 ret = encode(enc, &pkt, NULL, &got_packet);
1749 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1751 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1756 if (ost->logfile && enc->stats_out) {
1757 fprintf(ost->logfile, "%s", enc->stats_out);
1763 if (ost->finished & MUXER_FINISHED) {
1764 av_packet_unref(&pkt);
1767 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1768 pkt_size = pkt.size;
1769 write_frame(os, &pkt, ost);
1770 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1771 do_video_stats(ost, pkt_size);
1782 * Check whether a packet from ist should be written into ost at this time
1784 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1786 OutputFile *of = output_files[ost->file_index];
1787 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1789 if (ost->source_index != ist_index)
1795 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1801 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1803 OutputFile *of = output_files[ost->file_index];
1804 InputFile *f = input_files [ist->file_index];
1805 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1806 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1810 av_init_packet(&opkt);
1812 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1813 !ost->copy_initial_nonkeyframes)
1816 if (!ost->frame_number && !ost->copy_prior_start) {
1817 int64_t comp_start = start_time;
1818 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1819 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1820 if (pkt->pts == AV_NOPTS_VALUE ?
1821 ist->pts < comp_start :
1822 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1826 if (of->recording_time != INT64_MAX &&
1827 ist->pts >= of->recording_time + start_time) {
1828 close_output_stream(ost);
1832 if (f->recording_time != INT64_MAX) {
1833 start_time = f->ctx->start_time;
1834 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1835 start_time += f->start_time;
1836 if (ist->pts >= f->recording_time + start_time) {
1837 close_output_stream(ost);
1842 /* force the input stream PTS */
1843 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1846 if (pkt->pts != AV_NOPTS_VALUE)
1847 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1849 opkt.pts = AV_NOPTS_VALUE;
1851 if (pkt->dts == AV_NOPTS_VALUE)
1852 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1854 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1855 opkt.dts -= ost_tb_start_time;
1857 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1858 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1860 duration = ist->dec_ctx->frame_size;
1861 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1862 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1863 ost->st->time_base) - ost_tb_start_time;
1866 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1867 opkt.flags = pkt->flags;
1868 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1869 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1870 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1871 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1872 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1874 int ret = av_parser_change(ost->parser, ost->st->codec,
1875 &opkt.data, &opkt.size,
1876 pkt->data, pkt->size,
1877 pkt->flags & AV_PKT_FLAG_KEY);
1879 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1884 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1889 opkt.data = pkt->data;
1890 opkt.size = pkt->size;
1892 av_copy_packet_side_data(&opkt, pkt);
1894 #if FF_API_LAVF_FMT_RAWPICTURE
1895 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1896 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1897 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1898 /* store AVPicture in AVPacket, as expected by the output format */
1899 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1901 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1905 opkt.data = (uint8_t *)&pict;
1906 opkt.size = sizeof(AVPicture);
1907 opkt.flags |= AV_PKT_FLAG_KEY;
1911 write_frame(of->ctx, &opkt, ost);
1914 int guess_input_channel_layout(InputStream *ist)
1916 AVCodecContext *dec = ist->dec_ctx;
1918 if (!dec->channel_layout) {
1919 char layout_name[256];
1921 if (dec->channels > ist->guess_layout_max)
1923 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1924 if (!dec->channel_layout)
1926 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1927 dec->channels, dec->channel_layout);
1928 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1929 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1934 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1936 if (*got_output || ret<0)
1937 decode_error_stat[ret<0] ++;
1939 if (ret < 0 && exit_on_error)
1942 if (exit_on_error && *got_output && ist) {
1943 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1944 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1950 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1952 AVFrame *decoded_frame, *f;
1953 AVCodecContext *avctx = ist->dec_ctx;
1954 int i, ret, err = 0, resample_changed;
1955 AVRational decoded_frame_tb;
1957 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1958 return AVERROR(ENOMEM);
1959 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1960 return AVERROR(ENOMEM);
1961 decoded_frame = ist->decoded_frame;
1963 update_benchmark(NULL);
1964 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1965 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1967 if (ret >= 0 && avctx->sample_rate <= 0) {
1968 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1969 ret = AVERROR_INVALIDDATA;
1972 check_decode_result(ist, got_output, ret);
1974 if (!*got_output || ret < 0)
1977 ist->samples_decoded += decoded_frame->nb_samples;
1978 ist->frames_decoded++;
1981 /* increment next_dts to use for the case where the input stream does not
1982 have timestamps or there are multiple frames in the packet */
1983 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1985 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1989 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1990 ist->resample_channels != avctx->channels ||
1991 ist->resample_channel_layout != decoded_frame->channel_layout ||
1992 ist->resample_sample_rate != decoded_frame->sample_rate;
1993 if (resample_changed) {
1994 char layout1[64], layout2[64];
1996 if (!guess_input_channel_layout(ist)) {
1997 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1998 "layout for Input Stream #%d.%d\n", ist->file_index,
2002 decoded_frame->channel_layout = avctx->channel_layout;
2004 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2005 ist->resample_channel_layout);
2006 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2007 decoded_frame->channel_layout);
2009 av_log(NULL, AV_LOG_INFO,
2010 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2011 ist->file_index, ist->st->index,
2012 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2013 ist->resample_channels, layout1,
2014 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2015 avctx->channels, layout2);
2017 ist->resample_sample_fmt = decoded_frame->format;
2018 ist->resample_sample_rate = decoded_frame->sample_rate;
2019 ist->resample_channel_layout = decoded_frame->channel_layout;
2020 ist->resample_channels = avctx->channels;
2022 for (i = 0; i < nb_filtergraphs; i++)
2023 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2024 FilterGraph *fg = filtergraphs[i];
2025 if (configure_filtergraph(fg) < 0) {
2026 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2032 /* if the decoder provides a pts, use it instead of the last packet pts.
2033 the decoder could be delaying output by a packet or more. */
2034 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2035 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2036 decoded_frame_tb = avctx->time_base;
2037 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2038 decoded_frame->pts = decoded_frame->pkt_pts;
2039 decoded_frame_tb = ist->st->time_base;
2040 } else if (pkt->pts != AV_NOPTS_VALUE) {
2041 decoded_frame->pts = pkt->pts;
2042 decoded_frame_tb = ist->st->time_base;
2044 decoded_frame->pts = ist->dts;
2045 decoded_frame_tb = AV_TIME_BASE_Q;
2047 pkt->pts = AV_NOPTS_VALUE;
2048 if (decoded_frame->pts != AV_NOPTS_VALUE)
2049 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2050 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2051 (AVRational){1, avctx->sample_rate});
2052 ist->nb_samples = decoded_frame->nb_samples;
2053 for (i = 0; i < ist->nb_filters; i++) {
2054 if (i < ist->nb_filters - 1) {
2055 f = ist->filter_frame;
2056 err = av_frame_ref(f, decoded_frame);
2061 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2062 AV_BUFFERSRC_FLAG_PUSH);
2063 if (err == AVERROR_EOF)
2064 err = 0; /* ignore */
2068 decoded_frame->pts = AV_NOPTS_VALUE;
2070 av_frame_unref(ist->filter_frame);
2071 av_frame_unref(decoded_frame);
2072 return err < 0 ? err : ret;
2075 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2077 AVFrame *decoded_frame, *f;
2078 int i, ret = 0, err = 0, resample_changed;
2079 int64_t best_effort_timestamp;
2080 AVRational *frame_sample_aspect;
2082 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2083 return AVERROR(ENOMEM);
2084 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2085 return AVERROR(ENOMEM);
2086 decoded_frame = ist->decoded_frame;
2087 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2089 update_benchmark(NULL);
2090 ret = avcodec_decode_video2(ist->dec_ctx,
2091 decoded_frame, got_output, pkt);
2092 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2094 // The following line may be required in some cases where there is no parser
2095 // or the parser does not has_b_frames correctly
2096 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2097 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2098 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2100 av_log(ist->dec_ctx, AV_LOG_WARNING,
2101 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2102 "If you want to help, upload a sample "
2103 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2104 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2105 ist->dec_ctx->has_b_frames,
2106 ist->st->codec->has_b_frames);
2109 check_decode_result(ist, got_output, ret);
2111 if (*got_output && ret >= 0) {
2112 if (ist->dec_ctx->width != decoded_frame->width ||
2113 ist->dec_ctx->height != decoded_frame->height ||
2114 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2115 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2116 decoded_frame->width,
2117 decoded_frame->height,
2118 decoded_frame->format,
2119 ist->dec_ctx->width,
2120 ist->dec_ctx->height,
2121 ist->dec_ctx->pix_fmt);
2125 if (!*got_output || ret < 0)
2128 if(ist->top_field_first>=0)
2129 decoded_frame->top_field_first = ist->top_field_first;
2131 ist->frames_decoded++;
2133 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2134 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2138 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2140 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2141 if(best_effort_timestamp != AV_NOPTS_VALUE)
2142 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2145 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2146 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2147 ist->st->index, av_ts2str(decoded_frame->pts),
2148 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2149 best_effort_timestamp,
2150 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2151 decoded_frame->key_frame, decoded_frame->pict_type,
2152 ist->st->time_base.num, ist->st->time_base.den);
2157 if (ist->st->sample_aspect_ratio.num)
2158 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2160 resample_changed = ist->resample_width != decoded_frame->width ||
2161 ist->resample_height != decoded_frame->height ||
2162 ist->resample_pix_fmt != decoded_frame->format;
2163 if (resample_changed) {
2164 av_log(NULL, AV_LOG_INFO,
2165 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2166 ist->file_index, ist->st->index,
2167 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2168 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2170 ist->resample_width = decoded_frame->width;
2171 ist->resample_height = decoded_frame->height;
2172 ist->resample_pix_fmt = decoded_frame->format;
2174 for (i = 0; i < nb_filtergraphs; i++) {
2175 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2176 configure_filtergraph(filtergraphs[i]) < 0) {
2177 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2183 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2184 for (i = 0; i < ist->nb_filters; i++) {
2185 if (!frame_sample_aspect->num)
2186 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2188 if (i < ist->nb_filters - 1) {
2189 f = ist->filter_frame;
2190 err = av_frame_ref(f, decoded_frame);
2195 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2196 if (ret == AVERROR_EOF) {
2197 ret = 0; /* ignore */
2198 } else if (ret < 0) {
2199 av_log(NULL, AV_LOG_FATAL,
2200 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2206 av_frame_unref(ist->filter_frame);
2207 av_frame_unref(decoded_frame);
2208 return err < 0 ? err : ret;
2211 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2213 AVSubtitle subtitle;
2214 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2215 &subtitle, got_output, pkt);
2217 check_decode_result(NULL, got_output, ret);
2219 if (ret < 0 || !*got_output) {
2221 sub2video_flush(ist);
2225 if (ist->fix_sub_duration) {
2227 if (ist->prev_sub.got_output) {
2228 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2229 1000, AV_TIME_BASE);
2230 if (end < ist->prev_sub.subtitle.end_display_time) {
2231 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2232 "Subtitle duration reduced from %d to %d%s\n",
2233 ist->prev_sub.subtitle.end_display_time, end,
2234 end <= 0 ? ", dropping it" : "");
2235 ist->prev_sub.subtitle.end_display_time = end;
2238 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2239 FFSWAP(int, ret, ist->prev_sub.ret);
2240 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2248 sub2video_update(ist, &subtitle);
2250 if (!subtitle.num_rects)
2253 ist->frames_decoded++;
2255 for (i = 0; i < nb_output_streams; i++) {
2256 OutputStream *ost = output_streams[i];
2258 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2259 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2262 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2266 avsubtitle_free(&subtitle);
2270 static int send_filter_eof(InputStream *ist)
2273 for (i = 0; i < ist->nb_filters; i++) {
2274 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2281 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2282 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2288 if (!ist->saw_first_ts) {
2289 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2291 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2292 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2293 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2295 ist->saw_first_ts = 1;
2298 if (ist->next_dts == AV_NOPTS_VALUE)
2299 ist->next_dts = ist->dts;
2300 if (ist->next_pts == AV_NOPTS_VALUE)
2301 ist->next_pts = ist->pts;
2305 av_init_packet(&avpkt);
2313 if (pkt->dts != AV_NOPTS_VALUE) {
2314 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2315 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2316 ist->next_pts = ist->pts = ist->dts;
2319 // while we have more to decode or while the decoder did output something on EOF
2320 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2324 ist->pts = ist->next_pts;
2325 ist->dts = ist->next_dts;
2327 if (avpkt.size && avpkt.size != pkt->size &&
2328 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2329 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2330 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2331 ist->showed_multi_packet_warning = 1;
2334 switch (ist->dec_ctx->codec_type) {
2335 case AVMEDIA_TYPE_AUDIO:
2336 ret = decode_audio (ist, &avpkt, &got_output);
2338 case AVMEDIA_TYPE_VIDEO:
2339 ret = decode_video (ist, &avpkt, &got_output);
2340 if (avpkt.duration) {
2341 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2342 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2343 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2344 duration = ((int64_t)AV_TIME_BASE *
2345 ist->dec_ctx->framerate.den * ticks) /
2346 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2350 if(ist->dts != AV_NOPTS_VALUE && duration) {
2351 ist->next_dts += duration;
2353 ist->next_dts = AV_NOPTS_VALUE;
2356 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2358 case AVMEDIA_TYPE_SUBTITLE:
2359 ret = transcode_subtitles(ist, &avpkt, &got_output);
2366 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2367 ist->file_index, ist->st->index, av_err2str(ret));
2374 avpkt.pts= AV_NOPTS_VALUE;
2376 // touch data and size only if not EOF
2378 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2386 if (got_output && !pkt)
2390 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2391 /* except when looping we need to flush but not to send an EOF */
2392 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2393 int ret = send_filter_eof(ist);
2395 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2400 /* handle stream copy */
2401 if (!ist->decoding_needed) {
2402 ist->dts = ist->next_dts;
2403 switch (ist->dec_ctx->codec_type) {
2404 case AVMEDIA_TYPE_AUDIO:
2405 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2406 ist->dec_ctx->sample_rate;
2408 case AVMEDIA_TYPE_VIDEO:
2409 if (ist->framerate.num) {
2410 // TODO: Remove work-around for c99-to-c89 issue 7
2411 AVRational time_base_q = AV_TIME_BASE_Q;
2412 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2413 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2414 } else if (pkt->duration) {
2415 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2416 } else if(ist->dec_ctx->framerate.num != 0) {
2417 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2418 ist->next_dts += ((int64_t)AV_TIME_BASE *
2419 ist->dec_ctx->framerate.den * ticks) /
2420 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2424 ist->pts = ist->dts;
2425 ist->next_pts = ist->next_dts;
2427 for (i = 0; pkt && i < nb_output_streams; i++) {
2428 OutputStream *ost = output_streams[i];
2430 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2433 do_streamcopy(ist, ost, pkt);
2439 static void print_sdp(void)
2444 AVIOContext *sdp_pb;
2445 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2449 for (i = 0, j = 0; i < nb_output_files; i++) {
2450 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2451 avc[j] = output_files[i]->ctx;
2459 av_sdp_create(avc, j, sdp, sizeof(sdp));
2461 if (!sdp_filename) {
2462 printf("SDP:\n%s\n", sdp);
2465 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2466 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2468 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2469 avio_closep(&sdp_pb);
2470 av_freep(&sdp_filename);
2478 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2481 for (i = 0; hwaccels[i].name; i++)
2482 if (hwaccels[i].pix_fmt == pix_fmt)
2483 return &hwaccels[i];
2487 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2489 InputStream *ist = s->opaque;
2490 const enum AVPixelFormat *p;
2493 for (p = pix_fmts; *p != -1; p++) {
2494 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2495 const HWAccel *hwaccel;
2497 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2500 hwaccel = get_hwaccel(*p);
2502 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2503 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2506 ret = hwaccel->init(s);
2508 if (ist->hwaccel_id == hwaccel->id) {
2509 av_log(NULL, AV_LOG_FATAL,
2510 "%s hwaccel requested for input stream #%d:%d, "
2511 "but cannot be initialized.\n", hwaccel->name,
2512 ist->file_index, ist->st->index);
2513 return AV_PIX_FMT_NONE;
2517 ist->active_hwaccel_id = hwaccel->id;
2518 ist->hwaccel_pix_fmt = *p;
2525 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2527 InputStream *ist = s->opaque;
2529 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2530 return ist->hwaccel_get_buffer(s, frame, flags);
2532 return avcodec_default_get_buffer2(s, frame, flags);
2535 static int init_input_stream(int ist_index, char *error, int error_len)
2538 InputStream *ist = input_streams[ist_index];
2540 if (ist->decoding_needed) {
2541 AVCodec *codec = ist->dec;
2543 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2544 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2545 return AVERROR(EINVAL);
2548 ist->dec_ctx->opaque = ist;
2549 ist->dec_ctx->get_format = get_format;
2550 ist->dec_ctx->get_buffer2 = get_buffer;
2551 ist->dec_ctx->thread_safe_callbacks = 1;
2553 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2554 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2555 (ist->decoding_needed & DECODING_FOR_OST)) {
2556 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2557 if (ist->decoding_needed & DECODING_FOR_FILTER)
2558 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2561 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2562 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2563 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2564 if (ret == AVERROR_EXPERIMENTAL)
2565 abort_codec_experimental(codec, 0);
2567 snprintf(error, error_len,
2568 "Error while opening decoder for input stream "
2570 ist->file_index, ist->st->index, av_err2str(ret));
2573 assert_avoptions(ist->decoder_opts);
2576 ist->next_pts = AV_NOPTS_VALUE;
2577 ist->next_dts = AV_NOPTS_VALUE;
2582 static InputStream *get_input_stream(OutputStream *ost)
2584 if (ost->source_index >= 0)
2585 return input_streams[ost->source_index];
2589 static int compare_int64(const void *a, const void *b)
2591 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2594 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2598 if (ost->encoding_needed) {
2599 AVCodec *codec = ost->enc;
2600 AVCodecContext *dec = NULL;
2603 if ((ist = get_input_stream(ost)))
2605 if (dec && dec->subtitle_header) {
2606 /* ASS code assumes this buffer is null terminated so add extra byte. */
2607 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2608 if (!ost->enc_ctx->subtitle_header)
2609 return AVERROR(ENOMEM);
2610 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2611 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2613 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2614 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2615 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2617 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2618 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2619 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2621 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2622 if (ret == AVERROR_EXPERIMENTAL)
2623 abort_codec_experimental(codec, 1);
2624 snprintf(error, error_len,
2625 "Error while opening encoder for output stream #%d:%d - "
2626 "maybe incorrect parameters such as bit_rate, rate, width or height",
2627 ost->file_index, ost->index);
2630 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2631 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2632 av_buffersink_set_frame_size(ost->filter->filter,
2633 ost->enc_ctx->frame_size);
2634 assert_avoptions(ost->encoder_opts);
2635 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2636 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2637 " It takes bits/s as argument, not kbits/s\n");
2639 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2641 av_log(NULL, AV_LOG_FATAL,
2642 "Error initializing the output stream codec context.\n");
2646 // copy timebase while removing common factors
2647 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2648 ost->st->codec->codec= ost->enc_ctx->codec;
2650 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2652 av_log(NULL, AV_LOG_FATAL,
2653 "Error setting up codec context options.\n");
2656 // copy timebase while removing common factors
2657 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2663 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2664 AVCodecContext *avctx)
2667 int n = 1, i, size, index = 0;
2670 for (p = kf; *p; p++)
2674 pts = av_malloc_array(size, sizeof(*pts));
2676 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2681 for (i = 0; i < n; i++) {
2682 char *next = strchr(p, ',');
2687 if (!memcmp(p, "chapters", 8)) {
2689 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2692 if (avf->nb_chapters > INT_MAX - size ||
2693 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2695 av_log(NULL, AV_LOG_FATAL,
2696 "Could not allocate forced key frames array.\n");
2699 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2700 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2702 for (j = 0; j < avf->nb_chapters; j++) {
2703 AVChapter *c = avf->chapters[j];
2704 av_assert1(index < size);
2705 pts[index++] = av_rescale_q(c->start, c->time_base,
2706 avctx->time_base) + t;
2711 t = parse_time_or_die("force_key_frames", p, 1);
2712 av_assert1(index < size);
2713 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2720 av_assert0(index == size);
2721 qsort(pts, size, sizeof(*pts), compare_int64);
2722 ost->forced_kf_count = size;
2723 ost->forced_kf_pts = pts;
2726 static void report_new_stream(int input_index, AVPacket *pkt)
2728 InputFile *file = input_files[input_index];
2729 AVStream *st = file->ctx->streams[pkt->stream_index];
2731 if (pkt->stream_index < file->nb_streams_warn)
2733 av_log(file->ctx, AV_LOG_WARNING,
2734 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2735 av_get_media_type_string(st->codec->codec_type),
2736 input_index, pkt->stream_index,
2737 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2738 file->nb_streams_warn = pkt->stream_index + 1;
2741 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2743 AVDictionaryEntry *e;
2745 uint8_t *encoder_string;
2746 int encoder_string_len;
2747 int format_flags = 0;
2748 int codec_flags = 0;
2750 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2753 e = av_dict_get(of->opts, "fflags", NULL, 0);
2755 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2758 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2760 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2762 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2765 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2768 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2769 encoder_string = av_mallocz(encoder_string_len);
2770 if (!encoder_string)
2773 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2774 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2776 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2777 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2778 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2779 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2782 static int transcode_init(void)
2784 int ret = 0, i, j, k;
2785 AVFormatContext *oc;
2788 char error[1024] = {0};
2791 for (i = 0; i < nb_filtergraphs; i++) {
2792 FilterGraph *fg = filtergraphs[i];
2793 for (j = 0; j < fg->nb_outputs; j++) {
2794 OutputFilter *ofilter = fg->outputs[j];
2795 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2797 if (fg->nb_inputs != 1)
2799 for (k = nb_input_streams-1; k >= 0 ; k--)
2800 if (fg->inputs[0]->ist == input_streams[k])
2802 ofilter->ost->source_index = k;
2806 /* init framerate emulation */
2807 for (i = 0; i < nb_input_files; i++) {
2808 InputFile *ifile = input_files[i];
2809 if (ifile->rate_emu)
2810 for (j = 0; j < ifile->nb_streams; j++)
2811 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2814 /* for each output stream, we compute the right encoding parameters */
2815 for (i = 0; i < nb_output_streams; i++) {
2816 AVCodecContext *enc_ctx;
2817 AVCodecContext *dec_ctx = NULL;
2818 ost = output_streams[i];
2819 oc = output_files[ost->file_index]->ctx;
2820 ist = get_input_stream(ost);
2822 if (ost->attachment_filename)
2825 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2828 dec_ctx = ist->dec_ctx;
2830 ost->st->disposition = ist->st->disposition;
2831 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2832 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2834 for (j=0; j<oc->nb_streams; j++) {
2835 AVStream *st = oc->streams[j];
2836 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2839 if (j == oc->nb_streams)
2840 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2841 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2844 if (ost->stream_copy) {
2846 uint64_t extra_size;
2848 av_assert0(ist && !ost->filter);
2850 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2852 if (extra_size > INT_MAX) {
2853 return AVERROR(EINVAL);
2856 /* if stream_copy is selected, no need to decode or encode */
2857 enc_ctx->codec_id = dec_ctx->codec_id;
2858 enc_ctx->codec_type = dec_ctx->codec_type;
2860 if (!enc_ctx->codec_tag) {
2861 unsigned int codec_tag;
2862 if (!oc->oformat->codec_tag ||
2863 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2864 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2865 enc_ctx->codec_tag = dec_ctx->codec_tag;
2868 enc_ctx->bit_rate = dec_ctx->bit_rate;
2869 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2870 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2871 enc_ctx->field_order = dec_ctx->field_order;
2872 if (dec_ctx->extradata_size) {
2873 enc_ctx->extradata = av_mallocz(extra_size);
2874 if (!enc_ctx->extradata) {
2875 return AVERROR(ENOMEM);
2877 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2879 enc_ctx->extradata_size= dec_ctx->extradata_size;
2880 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2882 enc_ctx->time_base = ist->st->time_base;
2884 * Avi is a special case here because it supports variable fps but
2885 * having the fps and timebase differe significantly adds quite some
2888 if(!strcmp(oc->oformat->name, "avi")) {
2889 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2890 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2891 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2892 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2894 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2895 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2896 enc_ctx->ticks_per_frame = 2;
2897 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2898 && av_q2d(ist->st->time_base) < 1.0/500
2900 enc_ctx->time_base = dec_ctx->time_base;
2901 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2902 enc_ctx->time_base.den *= 2;
2903 enc_ctx->ticks_per_frame = 2;
2905 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2906 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2907 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2908 && strcmp(oc->oformat->name, "f4v")
2910 if( copy_tb<0 && dec_ctx->time_base.den
2911 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2912 && av_q2d(ist->st->time_base) < 1.0/500
2914 enc_ctx->time_base = dec_ctx->time_base;
2915 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2918 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2919 && dec_ctx->time_base.num < dec_ctx->time_base.den
2920 && dec_ctx->time_base.num > 0
2921 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2922 enc_ctx->time_base = dec_ctx->time_base;
2925 if (!ost->frame_rate.num)
2926 ost->frame_rate = ist->framerate;
2927 if(ost->frame_rate.num)
2928 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2930 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2931 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2933 if (ist->st->nb_side_data) {
2934 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2935 sizeof(*ist->st->side_data));
2936 if (!ost->st->side_data)
2937 return AVERROR(ENOMEM);
2939 ost->st->nb_side_data = 0;
2940 for (j = 0; j < ist->st->nb_side_data; j++) {
2941 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2942 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2944 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2947 sd_dst->data = av_malloc(sd_src->size);
2949 return AVERROR(ENOMEM);
2950 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2951 sd_dst->size = sd_src->size;
2952 sd_dst->type = sd_src->type;
2953 ost->st->nb_side_data++;
2957 ost->parser = av_parser_init(enc_ctx->codec_id);
2959 switch (enc_ctx->codec_type) {
2960 case AVMEDIA_TYPE_AUDIO:
2961 if (audio_volume != 256) {
2962 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2965 enc_ctx->channel_layout = dec_ctx->channel_layout;
2966 enc_ctx->sample_rate = dec_ctx->sample_rate;
2967 enc_ctx->channels = dec_ctx->channels;
2968 enc_ctx->frame_size = dec_ctx->frame_size;
2969 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2970 enc_ctx->block_align = dec_ctx->block_align;
2971 enc_ctx->initial_padding = dec_ctx->delay;
2972 enc_ctx->profile = dec_ctx->profile;
2973 #if FF_API_AUDIOENC_DELAY
2974 enc_ctx->delay = dec_ctx->delay;
2976 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2977 enc_ctx->block_align= 0;
2978 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2979 enc_ctx->block_align= 0;
2981 case AVMEDIA_TYPE_VIDEO:
2982 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2983 enc_ctx->width = dec_ctx->width;
2984 enc_ctx->height = dec_ctx->height;
2985 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2986 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2988 av_mul_q(ost->frame_aspect_ratio,
2989 (AVRational){ enc_ctx->height, enc_ctx->width });
2990 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2991 "with stream copy may produce invalid files\n");
2993 else if (ist->st->sample_aspect_ratio.num)
2994 sar = ist->st->sample_aspect_ratio;
2996 sar = dec_ctx->sample_aspect_ratio;
2997 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2998 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2999 ost->st->r_frame_rate = ist->st->r_frame_rate;
3001 case AVMEDIA_TYPE_SUBTITLE:
3002 enc_ctx->width = dec_ctx->width;
3003 enc_ctx->height = dec_ctx->height;
3005 case AVMEDIA_TYPE_UNKNOWN:
3006 case AVMEDIA_TYPE_DATA:
3007 case AVMEDIA_TYPE_ATTACHMENT:
3014 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3016 /* should only happen when a default codec is not present. */
3017 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3018 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3019 ret = AVERROR(EINVAL);
3023 set_encoder_id(output_files[ost->file_index], ost);
3026 if (qsv_transcode_init(ost))
3031 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3032 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3034 fg = init_simple_filtergraph(ist, ost);
3035 if (configure_filtergraph(fg)) {
3036 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3041 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3042 if (!ost->frame_rate.num)
3043 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3044 if (ist && !ost->frame_rate.num)
3045 ost->frame_rate = ist->framerate;
3046 if (ist && !ost->frame_rate.num)
3047 ost->frame_rate = ist->st->r_frame_rate;
3048 if (ist && !ost->frame_rate.num) {
3049 ost->frame_rate = (AVRational){25, 1};
3050 av_log(NULL, AV_LOG_WARNING,
3052 "about the input framerate is available. Falling "
3053 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3054 "if you want a different framerate.\n",
3055 ost->file_index, ost->index);
3057 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3058 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3059 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3060 ost->frame_rate = ost->enc->supported_framerates[idx];
3062 // reduce frame rate for mpeg4 to be within the spec limits
3063 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3064 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3065 ost->frame_rate.num, ost->frame_rate.den, 65535);
3069 switch (enc_ctx->codec_type) {
3070 case AVMEDIA_TYPE_AUDIO:
3071 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3072 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3073 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3074 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3075 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3077 case AVMEDIA_TYPE_VIDEO:
3078 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3079 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3080 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3081 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3082 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3083 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3084 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3086 for (j = 0; j < ost->forced_kf_count; j++)
3087 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3089 enc_ctx->time_base);
3091 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3092 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3093 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3094 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3095 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3096 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3097 if (!strncmp(ost->enc->name, "libx264", 7) &&
3098 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3099 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3100 av_log(NULL, AV_LOG_WARNING,
3101 "No pixel format specified, %s for H.264 encoding chosen.\n"
3102 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3103 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3104 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3105 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3106 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3107 av_log(NULL, AV_LOG_WARNING,
3108 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3109 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3110 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3111 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3113 ost->st->avg_frame_rate = ost->frame_rate;
3116 enc_ctx->width != dec_ctx->width ||
3117 enc_ctx->height != dec_ctx->height ||
3118 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3119 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3122 if (ost->forced_keyframes) {
3123 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3124 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3125 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3127 av_log(NULL, AV_LOG_ERROR,
3128 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3131 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3132 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3133 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3134 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3136 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3137 // parse it only for static kf timings
3138 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3139 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3143 case AVMEDIA_TYPE_SUBTITLE:
3144 enc_ctx->time_base = (AVRational){1, 1000};
3145 if (!enc_ctx->width) {
3146 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3147 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3150 case AVMEDIA_TYPE_DATA:
3158 if (ost->disposition) {
3159 static const AVOption opts[] = {
3160 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3161 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3162 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3163 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3164 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3165 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3166 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3167 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3168 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3169 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3170 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3171 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3172 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3173 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3176 static const AVClass class = {
3178 .item_name = av_default_item_name,
3180 .version = LIBAVUTIL_VERSION_INT,
3182 const AVClass *pclass = &class;
3184 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3190 /* open each encoder */
3191 for (i = 0; i < nb_output_streams; i++) {
3192 ret = init_output_stream(output_streams[i], error, sizeof(error));
3197 /* init input streams */
3198 for (i = 0; i < nb_input_streams; i++)
3199 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3200 for (i = 0; i < nb_output_streams; i++) {
3201 ost = output_streams[i];
3202 avcodec_close(ost->enc_ctx);
3207 /* discard unused programs */
3208 for (i = 0; i < nb_input_files; i++) {
3209 InputFile *ifile = input_files[i];
3210 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3211 AVProgram *p = ifile->ctx->programs[j];
3212 int discard = AVDISCARD_ALL;
3214 for (k = 0; k < p->nb_stream_indexes; k++)
3215 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3216 discard = AVDISCARD_DEFAULT;
3219 p->discard = discard;
3223 /* open files and write file headers */
3224 for (i = 0; i < nb_output_files; i++) {
3225 oc = output_files[i]->ctx;
3226 oc->interrupt_callback = int_cb;
3227 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3228 snprintf(error, sizeof(error),
3229 "Could not write header for output file #%d "
3230 "(incorrect codec parameters ?): %s",
3231 i, av_err2str(ret));
3232 ret = AVERROR(EINVAL);
3235 // assert_avoptions(output_files[i]->opts);
3236 if (strcmp(oc->oformat->name, "rtp")) {
3242 /* dump the file output parameters - cannot be done before in case
3244 for (i = 0; i < nb_output_files; i++) {
3245 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3248 /* dump the stream mapping */
3249 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3250 for (i = 0; i < nb_input_streams; i++) {
3251 ist = input_streams[i];
3253 for (j = 0; j < ist->nb_filters; j++) {
3254 if (ist->filters[j]->graph->graph_desc) {
3255 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3256 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3257 ist->filters[j]->name);
3258 if (nb_filtergraphs > 1)
3259 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3260 av_log(NULL, AV_LOG_INFO, "\n");
3265 for (i = 0; i < nb_output_streams; i++) {
3266 ost = output_streams[i];
3268 if (ost->attachment_filename) {
3269 /* an attached file */
3270 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3271 ost->attachment_filename, ost->file_index, ost->index);
3275 if (ost->filter && ost->filter->graph->graph_desc) {
3276 /* output from a complex graph */
3277 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3278 if (nb_filtergraphs > 1)
3279 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3281 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3282 ost->index, ost->enc ? ost->enc->name : "?");
3286 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3287 input_streams[ost->source_index]->file_index,
3288 input_streams[ost->source_index]->st->index,
3291 if (ost->sync_ist != input_streams[ost->source_index])
3292 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3293 ost->sync_ist->file_index,
3294 ost->sync_ist->st->index);
3295 if (ost->stream_copy)
3296 av_log(NULL, AV_LOG_INFO, " (copy)");
3298 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3299 const AVCodec *out_codec = ost->enc;
3300 const char *decoder_name = "?";
3301 const char *in_codec_name = "?";
3302 const char *encoder_name = "?";
3303 const char *out_codec_name = "?";
3304 const AVCodecDescriptor *desc;
3307 decoder_name = in_codec->name;
3308 desc = avcodec_descriptor_get(in_codec->id);
3310 in_codec_name = desc->name;
3311 if (!strcmp(decoder_name, in_codec_name))
3312 decoder_name = "native";
3316 encoder_name = out_codec->name;
3317 desc = avcodec_descriptor_get(out_codec->id);
3319 out_codec_name = desc->name;
3320 if (!strcmp(encoder_name, out_codec_name))
3321 encoder_name = "native";
3324 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3325 in_codec_name, decoder_name,
3326 out_codec_name, encoder_name);
3328 av_log(NULL, AV_LOG_INFO, "\n");
3332 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3336 if (sdp_filename || want_sdp) {
3340 transcode_init_done = 1;
3345 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3346 static int need_output(void)
3350 for (i = 0; i < nb_output_streams; i++) {
3351 OutputStream *ost = output_streams[i];
3352 OutputFile *of = output_files[ost->file_index];
3353 AVFormatContext *os = output_files[ost->file_index]->ctx;
3355 if (ost->finished ||
3356 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3358 if (ost->frame_number >= ost->max_frames) {
3360 for (j = 0; j < of->ctx->nb_streams; j++)
3361 close_output_stream(output_streams[of->ost_index + j]);
3372 * Select the output stream to process.
3374 * @return selected output stream, or NULL if none available
3376 static OutputStream *choose_output(void)
3379 int64_t opts_min = INT64_MAX;
3380 OutputStream *ost_min = NULL;
3382 for (i = 0; i < nb_output_streams; i++) {
3383 OutputStream *ost = output_streams[i];
3384 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3385 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3387 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3388 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3390 if (!ost->finished && opts < opts_min) {
3392 ost_min = ost->unavailable ? NULL : ost;
3398 static void set_tty_echo(int on)
3402 if (tcgetattr(0, &tty) == 0) {
3403 if (on) tty.c_lflag |= ECHO;
3404 else tty.c_lflag &= ~ECHO;
3405 tcsetattr(0, TCSANOW, &tty);
3410 static int check_keyboard_interaction(int64_t cur_time)
3413 static int64_t last_time;
3414 if (received_nb_signals)
3415 return AVERROR_EXIT;
3416 /* read_key() returns 0 on EOF */
3417 if(cur_time - last_time >= 100000 && !run_as_daemon){
3419 last_time = cur_time;
3423 return AVERROR_EXIT;
3424 if (key == '+') av_log_set_level(av_log_get_level()+10);
3425 if (key == '-') av_log_set_level(av_log_get_level()-10);
3426 if (key == 's') qp_hist ^= 1;
3429 do_hex_dump = do_pkt_dump = 0;
3430 } else if(do_pkt_dump){
3434 av_log_set_level(AV_LOG_DEBUG);
3436 if (key == 'c' || key == 'C'){
3437 char buf[4096], target[64], command[256], arg[256] = {0};
3440 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3443 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3448 fprintf(stderr, "\n");
3450 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3451 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3452 target, time, command, arg);
3453 for (i = 0; i < nb_filtergraphs; i++) {
3454 FilterGraph *fg = filtergraphs[i];
3457 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3458 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3459 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3460 } else if (key == 'c') {
3461 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3462 ret = AVERROR_PATCHWELCOME;
3464 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3466 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3471 av_log(NULL, AV_LOG_ERROR,
3472 "Parse error, at least 3 arguments were expected, "
3473 "only %d given in string '%s'\n", n, buf);
3476 if (key == 'd' || key == 'D'){
3479 debug = input_streams[0]->st->codec->debug<<1;
3480 if(!debug) debug = 1;
3481 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3488 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3493 fprintf(stderr, "\n");
3494 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3495 fprintf(stderr,"error parsing debug value\n");
3497 for(i=0;i<nb_input_streams;i++) {
3498 input_streams[i]->st->codec->debug = debug;
3500 for(i=0;i<nb_output_streams;i++) {
3501 OutputStream *ost = output_streams[i];
3502 ost->enc_ctx->debug = debug;
3504 if(debug) av_log_set_level(AV_LOG_DEBUG);
3505 fprintf(stderr,"debug=%d\n", debug);
3508 fprintf(stderr, "key function\n"
3509 "? show this help\n"
3510 "+ increase verbosity\n"
3511 "- decrease verbosity\n"
3512 "c Send command to first matching filter supporting it\n"
3513 "C Send/Que command to all matching filters\n"
3514 "D cycle through available debug modes\n"
3515 "h dump packets/hex press to cycle through the 3 states\n"
3517 "s Show QP histogram\n"
3524 static void *input_thread(void *arg)
3527 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3532 ret = av_read_frame(f->ctx, &pkt);
3534 if (ret == AVERROR(EAGAIN)) {
3539 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3542 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3543 if (flags && ret == AVERROR(EAGAIN)) {
3545 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3546 av_log(f->ctx, AV_LOG_WARNING,
3547 "Thread message queue blocking; consider raising the "
3548 "thread_queue_size option (current value: %d)\n",
3549 f->thread_queue_size);
3552 if (ret != AVERROR_EOF)
3553 av_log(f->ctx, AV_LOG_ERROR,
3554 "Unable to send packet to main thread: %s\n",
3556 av_packet_unref(&pkt);
3557 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3565 static void free_input_threads(void)
3569 for (i = 0; i < nb_input_files; i++) {
3570 InputFile *f = input_files[i];
3573 if (!f || !f->in_thread_queue)
3575 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3576 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3577 av_packet_unref(&pkt);
3579 pthread_join(f->thread, NULL);
3581 av_thread_message_queue_free(&f->in_thread_queue);
3585 static int init_input_threads(void)
3589 if (nb_input_files == 1)
3592 for (i = 0; i < nb_input_files; i++) {
3593 InputFile *f = input_files[i];
3595 if (f->ctx->pb ? !f->ctx->pb->seekable :
3596 strcmp(f->ctx->iformat->name, "lavfi"))
3597 f->non_blocking = 1;
3598 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3599 f->thread_queue_size, sizeof(AVPacket));
3603 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3604 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3605 av_thread_message_queue_free(&f->in_thread_queue);
3606 return AVERROR(ret);
3612 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3614 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3616 AV_THREAD_MESSAGE_NONBLOCK : 0);
3620 static int get_input_packet(InputFile *f, AVPacket *pkt)
3624 for (i = 0; i < f->nb_streams; i++) {
3625 InputStream *ist = input_streams[f->ist_index + i];
3626 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3627 int64_t now = av_gettime_relative() - ist->start;
3629 return AVERROR(EAGAIN);
3634 if (nb_input_files > 1)
3635 return get_input_packet_mt(f, pkt);
3637 return av_read_frame(f->ctx, pkt);
3640 static int got_eagain(void)
3643 for (i = 0; i < nb_output_streams; i++)
3644 if (output_streams[i]->unavailable)
3649 static void reset_eagain(void)
3652 for (i = 0; i < nb_input_files; i++)
3653 input_files[i]->eagain = 0;
3654 for (i = 0; i < nb_output_streams; i++)
3655 output_streams[i]->unavailable = 0;
3658 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3659 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3660 AVRational time_base)
3666 return tmp_time_base;
3669 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3672 return tmp_time_base;
3678 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3681 AVCodecContext *avctx;
3682 int i, ret, has_audio = 0;
3683 int64_t duration = 0;
3685 ret = av_seek_frame(is, -1, is->start_time, 0);
3689 for (i = 0; i < ifile->nb_streams; i++) {
3690 ist = input_streams[ifile->ist_index + i];
3691 avctx = ist->dec_ctx;
3694 if (ist->decoding_needed) {
3695 process_input_packet(ist, NULL, 1);
3696 avcodec_flush_buffers(avctx);
3699 /* duration is the length of the last frame in a stream
3700 * when audio stream is present we don't care about
3701 * last video frame length because it's not defined exactly */
3702 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3706 for (i = 0; i < ifile->nb_streams; i++) {
3707 ist = input_streams[ifile->ist_index + i];
3708 avctx = ist->dec_ctx;
3711 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3712 AVRational sample_rate = {1, avctx->sample_rate};
3714 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3718 if (ist->framerate.num) {
3719 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3720 } else if (ist->st->avg_frame_rate.num) {
3721 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3722 } else duration = 1;
3724 if (!ifile->duration)
3725 ifile->time_base = ist->st->time_base;
3726 /* the total duration of the stream, max_pts - min_pts is
3727 * the duration of the stream without the last frame */
3728 duration += ist->max_pts - ist->min_pts;
3729 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3733 if (ifile->loop > 0)
3741 * - 0 -- one packet was read and processed
3742 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3743 * this function should be called again
3744 * - AVERROR_EOF -- this function should not be called again
3746 static int process_input(int file_index)
3748 InputFile *ifile = input_files[file_index];
3749 AVFormatContext *is;
3757 ret = get_input_packet(ifile, &pkt);
3759 if (ret == AVERROR(EAGAIN)) {
3763 if (ret < 0 && ifile->loop) {
3764 if ((ret = seek_to_start(ifile, is)) < 0)
3766 ret = get_input_packet(ifile, &pkt);
3769 if (ret != AVERROR_EOF) {
3770 print_error(is->filename, ret);
3775 for (i = 0; i < ifile->nb_streams; i++) {
3776 ist = input_streams[ifile->ist_index + i];
3777 if (ist->decoding_needed) {
3778 ret = process_input_packet(ist, NULL, 0);
3783 /* mark all outputs that don't go through lavfi as finished */
3784 for (j = 0; j < nb_output_streams; j++) {
3785 OutputStream *ost = output_streams[j];
3787 if (ost->source_index == ifile->ist_index + i &&
3788 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3789 finish_output_stream(ost);
3793 ifile->eof_reached = 1;
3794 return AVERROR(EAGAIN);
3800 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3801 is->streams[pkt.stream_index]);
3803 /* the following test is needed in case new streams appear
3804 dynamically in stream : we ignore them */
3805 if (pkt.stream_index >= ifile->nb_streams) {
3806 report_new_stream(file_index, &pkt);
3807 goto discard_packet;
3810 ist = input_streams[ifile->ist_index + pkt.stream_index];
3812 ist->data_size += pkt.size;
3816 goto discard_packet;
3818 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3819 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3824 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3825 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3826 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3827 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3828 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3829 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3830 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3831 av_ts2str(input_files[ist->file_index]->ts_offset),
3832 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3835 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3836 int64_t stime, stime2;
3837 // Correcting starttime based on the enabled streams
3838 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3839 // so we instead do it here as part of discontinuity handling
3840 if ( ist->next_dts == AV_NOPTS_VALUE
3841 && ifile->ts_offset == -is->start_time
3842 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3843 int64_t new_start_time = INT64_MAX;
3844 for (i=0; i<is->nb_streams; i++) {
3845 AVStream *st = is->streams[i];
3846 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3848 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3850 if (new_start_time > is->start_time) {
3851 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3852 ifile->ts_offset = -new_start_time;
3856 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3857 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3858 ist->wrap_correction_done = 1;
3860 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3861 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3862 ist->wrap_correction_done = 0;
3864 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3865 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3866 ist->wrap_correction_done = 0;
3870 /* add the stream-global side data to the first packet */
3871 if (ist->nb_packets == 1) {
3872 if (ist->st->nb_side_data)
3873 av_packet_split_side_data(&pkt);
3874 for (i = 0; i < ist->st->nb_side_data; i++) {
3875 AVPacketSideData *src_sd = &ist->st->side_data[i];
3878 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3880 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3883 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3887 memcpy(dst_data, src_sd->data, src_sd->size);
3891 if (pkt.dts != AV_NOPTS_VALUE)
3892 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3893 if (pkt.pts != AV_NOPTS_VALUE)
3894 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3896 if (pkt.pts != AV_NOPTS_VALUE)
3897 pkt.pts *= ist->ts_scale;
3898 if (pkt.dts != AV_NOPTS_VALUE)
3899 pkt.dts *= ist->ts_scale;
3901 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3902 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3903 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3904 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3905 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3906 int64_t delta = pkt_dts - ifile->last_ts;
3907 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3908 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3909 ifile->ts_offset -= delta;
3910 av_log(NULL, AV_LOG_DEBUG,
3911 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3912 delta, ifile->ts_offset);
3913 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3914 if (pkt.pts != AV_NOPTS_VALUE)
3915 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3919 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3920 if (pkt.pts != AV_NOPTS_VALUE) {
3921 pkt.pts += duration;
3922 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3923 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3926 if (pkt.dts != AV_NOPTS_VALUE)
3927 pkt.dts += duration;
3929 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3930 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3931 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3932 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3934 int64_t delta = pkt_dts - ist->next_dts;
3935 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3936 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3937 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3938 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3939 ifile->ts_offset -= delta;
3940 av_log(NULL, AV_LOG_DEBUG,
3941 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3942 delta, ifile->ts_offset);
3943 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3944 if (pkt.pts != AV_NOPTS_VALUE)
3945 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3948 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3949 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3950 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3951 pkt.dts = AV_NOPTS_VALUE;
3953 if (pkt.pts != AV_NOPTS_VALUE){
3954 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3955 delta = pkt_pts - ist->next_dts;
3956 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3957 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3958 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3959 pkt.pts = AV_NOPTS_VALUE;
3965 if (pkt.dts != AV_NOPTS_VALUE)
3966 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3969 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3970 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3971 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3972 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3973 av_ts2str(input_files[ist->file_index]->ts_offset),
3974 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3977 sub2video_heartbeat(ist, pkt.pts);
3979 process_input_packet(ist, &pkt, 0);
3982 av_packet_unref(&pkt);
3988 * Perform a step of transcoding for the specified filter graph.
3990 * @param[in] graph filter graph to consider
3991 * @param[out] best_ist input stream where a frame would allow to continue
3992 * @return 0 for success, <0 for error
3994 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3997 int nb_requests, nb_requests_max = 0;
3998 InputFilter *ifilter;
4002 ret = avfilter_graph_request_oldest(graph->graph);
4004 return reap_filters(0);
4006 if (ret == AVERROR_EOF) {
4007 ret = reap_filters(1);
4008 for (i = 0; i < graph->nb_outputs; i++)
4009 close_output_stream(graph->outputs[i]->ost);
4012 if (ret != AVERROR(EAGAIN))
4015 for (i = 0; i < graph->nb_inputs; i++) {
4016 ifilter = graph->inputs[i];
4018 if (input_files[ist->file_index]->eagain ||
4019 input_files[ist->file_index]->eof_reached)
4021 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4022 if (nb_requests > nb_requests_max) {
4023 nb_requests_max = nb_requests;
4029 for (i = 0; i < graph->nb_outputs; i++)
4030 graph->outputs[i]->ost->unavailable = 1;
4036 * Run a single step of transcoding.
4038 * @return 0 for success, <0 for error
4040 static int transcode_step(void)
4046 ost = choose_output();
4053 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4058 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4063 av_assert0(ost->source_index >= 0);
4064 ist = input_streams[ost->source_index];
4067 ret = process_input(ist->file_index);
4068 if (ret == AVERROR(EAGAIN)) {
4069 if (input_files[ist->file_index]->eagain)
4070 ost->unavailable = 1;
4075 return ret == AVERROR_EOF ? 0 : ret;
4077 return reap_filters(0);
4081 * The following code is the main loop of the file converter
4083 static int transcode(void)
4086 AVFormatContext *os;
4089 int64_t timer_start;
4090 int64_t total_packets_written = 0;
4092 ret = transcode_init();
4096 if (stdin_interaction) {
4097 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4100 timer_start = av_gettime_relative();
4103 if ((ret = init_input_threads()) < 0)
4107 while (!received_sigterm) {
4108 int64_t cur_time= av_gettime_relative();
4110 /* if 'q' pressed, exits */
4111 if (stdin_interaction)
4112 if (check_keyboard_interaction(cur_time) < 0)
4115 /* check if there's any stream where output is still needed */
4116 if (!need_output()) {
4117 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4121 ret = transcode_step();
4123 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4127 av_strerror(ret, errbuf, sizeof(errbuf));
4129 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4134 /* dump report by using the output first video and audio streams */
4135 print_report(0, timer_start, cur_time);
4138 free_input_threads();
4141 /* at the end of stream, we must flush the decoder buffers */
4142 for (i = 0; i < nb_input_streams; i++) {
4143 ist = input_streams[i];
4144 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4145 process_input_packet(ist, NULL, 0);
4152 /* write the trailer if needed and close file */
4153 for (i = 0; i < nb_output_files; i++) {
4154 os = output_files[i]->ctx;
4155 if ((ret = av_write_trailer(os)) < 0) {
4156 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4162 /* dump report by using the first video and audio streams */
4163 print_report(1, timer_start, av_gettime_relative());
4165 /* close each encoder */
4166 for (i = 0; i < nb_output_streams; i++) {
4167 ost = output_streams[i];
4168 if (ost->encoding_needed) {
4169 av_freep(&ost->enc_ctx->stats_in);
4171 total_packets_written += ost->packets_written;
4174 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4175 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4179 /* close each decoder */
4180 for (i = 0; i < nb_input_streams; i++) {
4181 ist = input_streams[i];
4182 if (ist->decoding_needed) {
4183 avcodec_close(ist->dec_ctx);
4184 if (ist->hwaccel_uninit)
4185 ist->hwaccel_uninit(ist->dec_ctx);
4194 free_input_threads();
4197 if (output_streams) {
4198 for (i = 0; i < nb_output_streams; i++) {
4199 ost = output_streams[i];
4202 fclose(ost->logfile);
4203 ost->logfile = NULL;
4205 av_freep(&ost->forced_kf_pts);
4206 av_freep(&ost->apad);
4207 av_freep(&ost->disposition);
4208 av_dict_free(&ost->encoder_opts);
4209 av_dict_free(&ost->sws_dict);
4210 av_dict_free(&ost->swr_opts);
4211 av_dict_free(&ost->resample_opts);
4212 av_dict_free(&ost->bsf_args);
4220 static int64_t getutime(void)
4223 struct rusage rusage;
4225 getrusage(RUSAGE_SELF, &rusage);
4226 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4227 #elif HAVE_GETPROCESSTIMES
4229 FILETIME c, e, k, u;
4230 proc = GetCurrentProcess();
4231 GetProcessTimes(proc, &c, &e, &k, &u);
4232 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4234 return av_gettime_relative();
4238 static int64_t getmaxrss(void)
4240 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4241 struct rusage rusage;
4242 getrusage(RUSAGE_SELF, &rusage);
4243 return (int64_t)rusage.ru_maxrss * 1024;
4244 #elif HAVE_GETPROCESSMEMORYINFO
4246 PROCESS_MEMORY_COUNTERS memcounters;
4247 proc = GetCurrentProcess();
4248 memcounters.cb = sizeof(memcounters);
4249 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4250 return memcounters.PeakPagefileUsage;
4256 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4260 int main(int argc, char **argv)
4265 register_exit(ffmpeg_cleanup);
4267 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4269 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4270 parse_loglevel(argc, argv, options);
4272 if(argc>1 && !strcmp(argv[1], "-d")){
4274 av_log_set_callback(log_callback_null);
4279 avcodec_register_all();
4281 avdevice_register_all();
4283 avfilter_register_all();
4285 avformat_network_init();
4287 show_banner(argc, argv, options);
4291 /* parse options and open all input/output files */
4292 ret = ffmpeg_parse_options(argc, argv);
4296 if (nb_output_files <= 0 && nb_input_files == 0) {
4298 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4302 /* file converter / grab */
4303 if (nb_output_files <= 0) {
4304 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4308 // if (nb_input_files == 0) {
4309 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4313 current_time = ti = getutime();
4314 if (transcode() < 0)
4316 ti = getutime() - ti;
4318 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4320 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4321 decode_error_stat[0], decode_error_stat[1]);
4322 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4325 exit_program(received_nb_signals ? 255 : main_return_code);
4326 return main_return_code;