2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
62 #include "libavutil/threadmessage.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
66 # include "libavfilter/avcodec.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 InputStream **input_streams = NULL;
140 int nb_input_streams = 0;
141 InputFile **input_files = NULL;
142 int nb_input_files = 0;
144 OutputStream **output_streams = NULL;
145 int nb_output_streams = 0;
146 OutputFile **output_files = NULL;
147 int nb_output_files = 0;
149 FilterGraph **filtergraphs;
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
160 static void free_input_threads(void);
164 Convert subtitles to video with alpha to insert them in filter graphs.
165 This is a temporary solution until libavfilter gets real subtitles support.
168 static int sub2video_get_blank_frame(InputStream *ist)
171 AVFrame *frame = ist->sub2video.frame;
173 av_frame_unref(frame);
174 ist->sub2video.frame->width = ist->sub2video.w;
175 ist->sub2video.frame->height = ist->sub2video.h;
176 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
177 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186 uint32_t *pal, *dst2;
190 if (r->type != SUBTITLE_BITMAP) {
191 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 int w = ist->sub2video.w, h = ist->sub2video.h;
228 AVFrame *frame = ist->sub2video.frame;
232 int64_t pts, end_pts;
237 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
238 AV_TIME_BASE_Q, ist->st->time_base);
239 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 num_rects = sub->num_rects;
243 pts = ist->sub2video.end_pts;
247 if (sub2video_get_blank_frame(ist) < 0) {
248 av_log(ist->dec_ctx, AV_LOG_ERROR,
249 "Impossible to get a blank canvas.\n");
252 dst = frame->data [0];
253 dst_linesize = frame->linesize[0];
254 for (i = 0; i < num_rects; i++)
255 sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
256 sub2video_push_ref(ist, pts);
257 ist->sub2video.end_pts = end_pts;
260 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
262 InputFile *infile = input_files[ist->file_index];
266 /* When a frame is read from a file, examine all sub2video streams in
267 the same file and send the sub2video frame again. Otherwise, decoded
268 video frames could be accumulating in the filter graph while a filter
269 (possibly overlay) is desperately waiting for a subtitle frame. */
270 for (i = 0; i < infile->nb_streams; i++) {
271 InputStream *ist2 = input_streams[infile->ist_index + i];
272 if (!ist2->sub2video.frame)
274 /* subtitles seem to be usually muxed ahead of other streams;
275 if not, subtracting a larger time here is necessary */
276 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
277 /* do not send the heartbeat frame if the subtitle is already ahead */
278 if (pts2 <= ist2->sub2video.last_pts)
280 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
281 sub2video_update(ist2, NULL);
282 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
283 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
285 sub2video_push_ref(ist2, pts2);
289 static void sub2video_flush(InputStream *ist)
293 if (ist->sub2video.end_pts < INT64_MAX)
294 sub2video_update(ist, NULL);
295 for (i = 0; i < ist->nb_filters; i++)
296 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
299 /* end of sub2video hack */
301 static void term_exit_sigsafe(void)
305 tcsetattr (0, TCSANOW, &oldtty);
311 av_log(NULL, AV_LOG_QUIET, "%s", "");
315 static volatile int received_sigterm = 0;
316 static volatile int received_nb_signals = 0;
317 static volatile int transcode_init_done = 0;
318 static volatile int ffmpeg_exited = 0;
319 static int main_return_code = 0;
322 sigterm_handler(int sig)
324 received_sigterm = sig;
325 received_nb_signals++;
327 if(received_nb_signals > 3) {
328 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
329 strlen("Received > 3 system signals, hard exiting\n"));
335 #if HAVE_SETCONSOLECTRLHANDLER
336 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
338 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
343 case CTRL_BREAK_EVENT:
344 sigterm_handler(SIGINT);
347 case CTRL_CLOSE_EVENT:
348 case CTRL_LOGOFF_EVENT:
349 case CTRL_SHUTDOWN_EVENT:
350 sigterm_handler(SIGTERM);
351 /* Basically, with these 3 events, when we return from this method the
352 process is hard terminated, so stall as long as we need to
353 to try and let the main thread(s) clean up and gracefully terminate
354 (we have at most 5 seconds, but should be done far before that). */
355 while (!ffmpeg_exited) {
361 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
374 istty = isatty(0) && isatty(2);
376 if (istty && tcgetattr (0, &tty) == 0) {
380 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
381 |INLCR|IGNCR|ICRNL|IXON);
382 tty.c_oflag |= OPOST;
383 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
384 tty.c_cflag &= ~(CSIZE|PARENB);
389 tcsetattr (0, TCSANOW, &tty);
391 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
395 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
396 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
398 signal(SIGXCPU, sigterm_handler);
400 #if HAVE_SETCONSOLECTRLHANDLER
401 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
405 /* read a key without blocking */
406 static int read_key(void)
418 n = select(1, &rfds, NULL, NULL, &tv);
427 # if HAVE_PEEKNAMEDPIPE
429 static HANDLE input_handle;
432 input_handle = GetStdHandle(STD_INPUT_HANDLE);
433 is_pipe = !GetConsoleMode(input_handle, &dw);
436 if (stdin->_cnt > 0) {
441 /* When running under a GUI, you will end here. */
442 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
443 // input pipe may have been closed by the program that ran ffmpeg
461 static int decode_interrupt_cb(void *ctx)
463 return received_nb_signals > transcode_init_done;
466 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
468 static void ffmpeg_cleanup(int ret)
473 int maxrss = getmaxrss() / 1024;
474 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
477 for (i = 0; i < nb_filtergraphs; i++) {
478 FilterGraph *fg = filtergraphs[i];
479 avfilter_graph_free(&fg->graph);
480 for (j = 0; j < fg->nb_inputs; j++) {
481 av_freep(&fg->inputs[j]->name);
482 av_freep(&fg->inputs[j]);
484 av_freep(&fg->inputs);
485 for (j = 0; j < fg->nb_outputs; j++) {
486 av_freep(&fg->outputs[j]->name);
487 av_freep(&fg->outputs[j]);
489 av_freep(&fg->outputs);
490 av_freep(&fg->graph_desc);
492 av_freep(&filtergraphs[i]);
494 av_freep(&filtergraphs);
496 av_freep(&subtitle_out);
499 for (i = 0; i < nb_output_files; i++) {
500 OutputFile *of = output_files[i];
505 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
507 avformat_free_context(s);
508 av_dict_free(&of->opts);
510 av_freep(&output_files[i]);
512 for (i = 0; i < nb_output_streams; i++) {
513 OutputStream *ost = output_streams[i];
514 AVBitStreamFilterContext *bsfc;
519 bsfc = ost->bitstream_filters;
521 AVBitStreamFilterContext *next = bsfc->next;
522 av_bitstream_filter_close(bsfc);
525 ost->bitstream_filters = NULL;
526 av_frame_free(&ost->filtered_frame);
527 av_frame_free(&ost->last_frame);
529 av_parser_close(ost->parser);
531 av_freep(&ost->forced_keyframes);
532 av_expr_free(ost->forced_keyframes_pexpr);
533 av_freep(&ost->avfilter);
534 av_freep(&ost->logfile_prefix);
536 av_freep(&ost->audio_channels_map);
537 ost->audio_channels_mapped = 0;
539 avcodec_free_context(&ost->enc_ctx);
541 av_freep(&output_streams[i]);
544 free_input_threads();
546 for (i = 0; i < nb_input_files; i++) {
547 avformat_close_input(&input_files[i]->ctx);
548 av_freep(&input_files[i]);
550 for (i = 0; i < nb_input_streams; i++) {
551 InputStream *ist = input_streams[i];
553 av_frame_free(&ist->decoded_frame);
554 av_frame_free(&ist->filter_frame);
555 av_dict_free(&ist->decoder_opts);
556 avsubtitle_free(&ist->prev_sub.subtitle);
557 av_frame_free(&ist->sub2video.frame);
558 av_freep(&ist->filters);
559 av_freep(&ist->hwaccel_device);
561 avcodec_free_context(&ist->dec_ctx);
563 av_freep(&input_streams[i]);
568 av_freep(&vstats_filename);
570 av_freep(&input_streams);
571 av_freep(&input_files);
572 av_freep(&output_streams);
573 av_freep(&output_files);
577 avformat_network_deinit();
579 if (received_sigterm) {
580 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
581 (int) received_sigterm);
582 } else if (ret && transcode_init_done) {
583 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
589 void remove_avoptions(AVDictionary **a, AVDictionary *b)
591 AVDictionaryEntry *t = NULL;
593 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
594 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
598 void assert_avoptions(AVDictionary *m)
600 AVDictionaryEntry *t;
601 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
602 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
607 static void abort_codec_experimental(AVCodec *c, int encoder)
612 static void update_benchmark(const char *fmt, ...)
614 if (do_benchmark_all) {
615 int64_t t = getutime();
621 vsnprintf(buf, sizeof(buf), fmt, va);
623 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
629 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
632 for (i = 0; i < nb_output_streams; i++) {
633 OutputStream *ost2 = output_streams[i];
634 ost2->finished |= ost == ost2 ? this_stream : others;
638 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
640 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
641 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
644 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
645 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
646 if (ost->st->codec->extradata) {
647 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
648 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
652 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
653 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
654 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
657 * Audio encoders may split the packets -- #frames in != #packets out.
658 * But there is no reordering, so we can limit the number of output packets
659 * by simply dropping them here.
660 * Counting encoded video frames needs to be done separately because of
661 * reordering, see do_video_out()
663 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
664 if (ost->frame_number >= ost->max_frames) {
672 av_packet_split_side_data(pkt);
675 AVPacket new_pkt = *pkt;
676 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
679 int a = av_bitstream_filter_filter(bsfc, avctx,
680 bsf_arg ? bsf_arg->value : NULL,
681 &new_pkt.data, &new_pkt.size,
682 pkt->data, pkt->size,
683 pkt->flags & AV_PKT_FLAG_KEY);
684 if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
685 uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
687 memcpy(t, new_pkt.data, new_pkt.size);
688 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
696 pkt->side_data = NULL;
697 pkt->side_data_elems = 0;
699 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
700 av_buffer_default_free, NULL, 0);
705 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
706 bsfc->filter->name, pkt->stream_index,
707 avctx->codec ? avctx->codec->name : "copy");
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
730 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
731 pkt->dts != AV_NOPTS_VALUE &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if(pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static int check_recording_time(OutputStream *ost)
791 OutputFile *of = output_files[ost->file_index];
793 if (of->recording_time != INT64_MAX &&
794 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
795 AV_TIME_BASE_Q) >= 0) {
796 close_output_stream(ost);
802 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
805 AVCodecContext *enc = ost->enc_ctx;
809 av_init_packet(&pkt);
813 if (!check_recording_time(ost))
816 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
817 frame->pts = ost->sync_opts;
818 ost->sync_opts = frame->pts + frame->nb_samples;
819 ost->samples_encoded += frame->nb_samples;
820 ost->frames_encoded++;
822 av_assert0(pkt.size || !pkt.data);
823 update_benchmark(NULL);
825 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
826 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
827 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
828 enc->time_base.num, enc->time_base.den);
831 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
832 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
835 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
838 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
841 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
842 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
843 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
844 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
847 write_frame(s, &pkt, ost);
851 static void do_subtitle_out(AVFormatContext *s,
856 int subtitle_out_max_size = 1024 * 1024;
857 int subtitle_out_size, nb, i;
862 if (sub->pts == AV_NOPTS_VALUE) {
863 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
872 subtitle_out = av_malloc(subtitle_out_max_size);
874 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
879 /* Note: DVB subtitle need one packet to draw them and one other
880 packet to clear them */
881 /* XXX: signal it in the codec context ? */
882 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
887 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
889 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
890 pts -= output_files[ost->file_index]->start_time;
891 for (i = 0; i < nb; i++) {
892 unsigned save_num_rects = sub->num_rects;
894 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
895 if (!check_recording_time(ost))
899 // start_display_time is required to be 0
900 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
901 sub->end_display_time -= sub->start_display_time;
902 sub->start_display_time = 0;
906 ost->frames_encoded++;
908 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
909 subtitle_out_max_size, sub);
911 sub->num_rects = save_num_rects;
912 if (subtitle_out_size < 0) {
913 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
917 av_init_packet(&pkt);
918 pkt.data = subtitle_out;
919 pkt.size = subtitle_out_size;
920 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
921 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
922 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
923 /* XXX: the pts correction is handled here. Maybe handling
924 it in the codec would be better */
926 pkt.pts += 90 * sub->start_display_time;
928 pkt.pts += 90 * sub->end_display_time;
931 write_frame(s, &pkt, ost);
935 static void do_video_out(AVFormatContext *s,
937 AVFrame *next_picture,
940 int ret, format_video_sync;
942 AVCodecContext *enc = ost->enc_ctx;
943 AVCodecContext *mux_enc = ost->st->codec;
944 int nb_frames, nb0_frames, i;
945 double delta, delta0;
948 InputStream *ist = NULL;
949 AVFilterContext *filter = ost->filter->filter;
951 if (ost->source_index >= 0)
952 ist = input_streams[ost->source_index];
954 if (filter->inputs[0]->frame_rate.num > 0 &&
955 filter->inputs[0]->frame_rate.den > 0)
956 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
958 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
959 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
961 if (!ost->filters_script &&
965 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
966 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
971 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
972 ost->last_nb0_frames[1],
973 ost->last_nb0_frames[2]);
975 delta0 = sync_ipts - ost->sync_opts;
976 delta = delta0 + duration;
978 /* by default, we output a single frame */
982 format_video_sync = video_sync_method;
983 if (format_video_sync == VSYNC_AUTO) {
984 if(!strcmp(s->oformat->name, "avi")) {
985 format_video_sync = VSYNC_VFR;
987 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
989 && format_video_sync == VSYNC_CFR
990 && input_files[ist->file_index]->ctx->nb_streams == 1
991 && input_files[ist->file_index]->input_ts_offset == 0) {
992 format_video_sync = VSYNC_VSCFR;
994 if (format_video_sync == VSYNC_CFR && copy_ts) {
995 format_video_sync = VSYNC_VSCFR;
1001 format_video_sync != VSYNC_PASSTHROUGH &&
1002 format_video_sync != VSYNC_DROP) {
1003 double cor = FFMIN(-delta0, duration);
1004 if (delta0 < -0.6) {
1005 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1007 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1013 switch (format_video_sync) {
1015 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1016 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1019 ost->sync_opts = lrint(sync_ipts);
1022 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1023 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1025 } else if (delta < -1.1)
1027 else if (delta > 1.1) {
1028 nb_frames = lrintf(delta);
1030 nb0_frames = lrintf(delta0 - 0.6);
1036 else if (delta > 0.6)
1037 ost->sync_opts = lrint(sync_ipts);
1040 case VSYNC_PASSTHROUGH:
1041 ost->sync_opts = lrint(sync_ipts);
1048 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1049 nb0_frames = FFMIN(nb0_frames, nb_frames);
1051 memmove(ost->last_nb0_frames + 1,
1052 ost->last_nb0_frames,
1053 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1054 ost->last_nb0_frames[0] = nb0_frames;
1056 if (nb0_frames == 0 && ost->last_droped) {
1058 av_log(NULL, AV_LOG_VERBOSE,
1059 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1060 ost->frame_number, ost->st->index, ost->last_frame->pts);
1062 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1063 if (nb_frames > dts_error_threshold * 30) {
1064 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1068 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1069 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1071 ost->last_droped = nb_frames == nb0_frames && next_picture;
1073 /* duplicates frame if needed */
1074 for (i = 0; i < nb_frames; i++) {
1075 AVFrame *in_picture;
1076 av_init_packet(&pkt);
1080 if (i < nb0_frames && ost->last_frame) {
1081 in_picture = ost->last_frame;
1083 in_picture = next_picture;
1088 in_picture->pts = ost->sync_opts;
1091 if (!check_recording_time(ost))
1093 if (ost->frame_number >= ost->max_frames)
1097 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1098 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1099 /* raw pictures are written as AVPicture structure to
1100 avoid any copies. We support temporarily the older
1102 if (in_picture->interlaced_frame)
1103 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1105 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1106 pkt.data = (uint8_t *)in_picture;
1107 pkt.size = sizeof(AVPicture);
1108 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1109 pkt.flags |= AV_PKT_FLAG_KEY;
1111 write_frame(s, &pkt, ost);
1113 int got_packet, forced_keyframe = 0;
1116 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
1117 ost->top_field_first >= 0)
1118 in_picture->top_field_first = !!ost->top_field_first;
1120 if (in_picture->interlaced_frame) {
1121 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1122 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1124 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1126 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1128 in_picture->quality = enc->global_quality;
1129 in_picture->pict_type = 0;
1131 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1132 in_picture->pts * av_q2d(enc->time_base) : NAN;
1133 if (ost->forced_kf_index < ost->forced_kf_count &&
1134 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1135 ost->forced_kf_index++;
1136 forced_keyframe = 1;
1137 } else if (ost->forced_keyframes_pexpr) {
1139 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1140 res = av_expr_eval(ost->forced_keyframes_pexpr,
1141 ost->forced_keyframes_expr_const_values, NULL);
1142 av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1143 ost->forced_keyframes_expr_const_values[FKF_N],
1144 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1145 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1146 ost->forced_keyframes_expr_const_values[FKF_T],
1147 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1150 forced_keyframe = 1;
1151 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1152 ost->forced_keyframes_expr_const_values[FKF_N];
1153 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1154 ost->forced_keyframes_expr_const_values[FKF_T];
1155 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1158 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1159 } else if ( ost->forced_keyframes
1160 && !strncmp(ost->forced_keyframes, "source", 6)
1161 && in_picture->key_frame==1) {
1162 forced_keyframe = 1;
1165 if (forced_keyframe) {
1166 in_picture->pict_type = AV_PICTURE_TYPE_I;
1167 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1170 update_benchmark(NULL);
1172 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1173 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1174 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1175 enc->time_base.num, enc->time_base.den);
1178 ost->frames_encoded++;
1180 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1181 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1183 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1189 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1190 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1191 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1192 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1195 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1196 pkt.pts = ost->sync_opts;
1198 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1201 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1202 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1203 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1204 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1207 frame_size = pkt.size;
1208 write_frame(s, &pkt, ost);
1210 /* if two pass, output log */
1211 if (ost->logfile && enc->stats_out) {
1212 fprintf(ost->logfile, "%s", enc->stats_out);
1218 * For video, number of frames in == number of packets out.
1219 * But there may be reordering, so we can't throw away frames on encoder
1220 * flush, we need to limit them here, before they go into encoder.
1222 ost->frame_number++;
1224 if (vstats_filename && frame_size)
1225 do_video_stats(ost, frame_size);
1228 if (!ost->last_frame)
1229 ost->last_frame = av_frame_alloc();
1230 av_frame_unref(ost->last_frame);
1231 if (next_picture && ost->last_frame)
1232 av_frame_ref(ost->last_frame, next_picture);
1234 av_frame_free(&ost->last_frame);
1237 static double psnr(double d)
1239 return -10.0 * log(d) / log(10.0);
1242 static void do_video_stats(OutputStream *ost, int frame_size)
1244 AVCodecContext *enc;
1246 double ti1, bitrate, avg_bitrate;
1248 /* this is executed just the first time do_video_stats is called */
1250 vstats_file = fopen(vstats_filename, "w");
1258 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1259 frame_number = ost->st->nb_frames;
1260 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1261 if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1262 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1264 fprintf(vstats_file,"f_size= %6d ", frame_size);
1265 /* compute pts value */
1266 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1270 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1271 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1272 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1273 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1274 fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1278 static void finish_output_stream(OutputStream *ost)
1280 OutputFile *of = output_files[ost->file_index];
1283 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1286 for (i = 0; i < of->ctx->nb_streams; i++)
1287 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1292 * Get and encode new output from any of the filtergraphs, without causing
1295 * @return 0 for success, <0 for severe errors
1297 static int reap_filters(int flush)
1299 AVFrame *filtered_frame = NULL;
1302 /* Reap all buffers present in the buffer sinks */
1303 for (i = 0; i < nb_output_streams; i++) {
1304 OutputStream *ost = output_streams[i];
1305 OutputFile *of = output_files[ost->file_index];
1306 AVFilterContext *filter;
1307 AVCodecContext *enc = ost->enc_ctx;
1312 filter = ost->filter->filter;
1314 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1315 return AVERROR(ENOMEM);
1317 filtered_frame = ost->filtered_frame;
1320 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1321 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1322 AV_BUFFERSINK_FLAG_NO_REQUEST);
1324 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1325 av_log(NULL, AV_LOG_WARNING,
1326 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1327 } else if (flush && ret == AVERROR_EOF) {
1328 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1329 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1333 if (ost->finished) {
1334 av_frame_unref(filtered_frame);
1337 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1338 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1339 AVRational tb = enc->time_base;
1340 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1342 tb.den <<= extra_bits;
1344 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1345 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1346 float_pts /= 1 << extra_bits;
1347 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1348 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1350 filtered_frame->pts =
1351 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1352 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1354 //if (ost->source_index >= 0)
1355 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1357 switch (filter->inputs[0]->type) {
1358 case AVMEDIA_TYPE_VIDEO:
1359 if (!ost->frame_aspect_ratio.num)
1360 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1363 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1364 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1366 enc->time_base.num, enc->time_base.den);
1369 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1371 case AVMEDIA_TYPE_AUDIO:
1372 if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1373 enc->channels != av_frame_get_channels(filtered_frame)) {
1374 av_log(NULL, AV_LOG_ERROR,
1375 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1378 do_audio_out(of->ctx, ost, filtered_frame);
1381 // TODO support subtitle filters
1385 av_frame_unref(filtered_frame);
1392 static void print_final_stats(int64_t total_size)
1394 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1395 uint64_t subtitle_size = 0;
1396 uint64_t data_size = 0;
1397 float percent = -1.0;
1401 for (i = 0; i < nb_output_streams; i++) {
1402 OutputStream *ost = output_streams[i];
1403 switch (ost->enc_ctx->codec_type) {
1404 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1405 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1406 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1407 default: other_size += ost->data_size; break;
1409 extra_size += ost->enc_ctx->extradata_size;
1410 data_size += ost->data_size;
1411 if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1412 != CODEC_FLAG_PASS1)
1416 if (data_size && total_size>0 && total_size >= data_size)
1417 percent = 100.0 * (total_size - data_size) / data_size;
1419 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1420 video_size / 1024.0,
1421 audio_size / 1024.0,
1422 subtitle_size / 1024.0,
1423 other_size / 1024.0,
1424 extra_size / 1024.0);
1426 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1428 av_log(NULL, AV_LOG_INFO, "unknown");
1429 av_log(NULL, AV_LOG_INFO, "\n");
1431 /* print verbose per-stream stats */
1432 for (i = 0; i < nb_input_files; i++) {
1433 InputFile *f = input_files[i];
1434 uint64_t total_packets = 0, total_size = 0;
1436 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1437 i, f->ctx->filename);
1439 for (j = 0; j < f->nb_streams; j++) {
1440 InputStream *ist = input_streams[f->ist_index + j];
1441 enum AVMediaType type = ist->dec_ctx->codec_type;
1443 total_size += ist->data_size;
1444 total_packets += ist->nb_packets;
1446 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1447 i, j, media_type_string(type));
1448 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1449 ist->nb_packets, ist->data_size);
1451 if (ist->decoding_needed) {
1452 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1453 ist->frames_decoded);
1454 if (type == AVMEDIA_TYPE_AUDIO)
1455 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1456 av_log(NULL, AV_LOG_VERBOSE, "; ");
1459 av_log(NULL, AV_LOG_VERBOSE, "\n");
1462 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1463 total_packets, total_size);
1466 for (i = 0; i < nb_output_files; i++) {
1467 OutputFile *of = output_files[i];
1468 uint64_t total_packets = 0, total_size = 0;
1470 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1471 i, of->ctx->filename);
1473 for (j = 0; j < of->ctx->nb_streams; j++) {
1474 OutputStream *ost = output_streams[of->ost_index + j];
1475 enum AVMediaType type = ost->enc_ctx->codec_type;
1477 total_size += ost->data_size;
1478 total_packets += ost->packets_written;
1480 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1481 i, j, media_type_string(type));
1482 if (ost->encoding_needed) {
1483 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1484 ost->frames_encoded);
1485 if (type == AVMEDIA_TYPE_AUDIO)
1486 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1487 av_log(NULL, AV_LOG_VERBOSE, "; ");
1490 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1491 ost->packets_written, ost->data_size);
1493 av_log(NULL, AV_LOG_VERBOSE, "\n");
1496 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1497 total_packets, total_size);
1499 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1500 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1502 av_log(NULL, AV_LOG_WARNING, "\n");
1504 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1509 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1512 AVBPrint buf_script;
1514 AVFormatContext *oc;
1516 AVCodecContext *enc;
1517 int frame_number, vid, i;
1519 int64_t pts = INT64_MIN;
1520 static int64_t last_time = -1;
1521 static int qp_histogram[52];
1522 int hours, mins, secs, us;
1524 if (!print_stats && !is_last_report && !progress_avio)
1527 if (!is_last_report) {
1528 if (last_time == -1) {
1529 last_time = cur_time;
1532 if ((cur_time - last_time) < 500000)
1534 last_time = cur_time;
1538 oc = output_files[0]->ctx;
1540 total_size = avio_size(oc->pb);
1541 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1542 total_size = avio_tell(oc->pb);
1546 av_bprint_init(&buf_script, 0, 1);
1547 for (i = 0; i < nb_output_streams; i++) {
1549 ost = output_streams[i];
1551 if (!ost->stream_copy && enc->coded_frame)
1552 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1553 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1554 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1555 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1556 ost->file_index, ost->index, q);
1558 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1559 float fps, t = (cur_time-timer_start) / 1000000.0;
1561 frame_number = ost->frame_number;
1562 fps = t > 1 ? frame_number / t : 0;
1563 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1564 frame_number, fps < 9.95, fps, q);
1565 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1566 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1567 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1568 ost->file_index, ost->index, q);
1570 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1574 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1576 for (j = 0; j < 32; j++)
1577 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1579 if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1581 double error, error_sum = 0;
1582 double scale, scale_sum = 0;
1584 char type[3] = { 'Y','U','V' };
1585 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1586 for (j = 0; j < 3; j++) {
1587 if (is_last_report) {
1588 error = enc->error[j];
1589 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1591 error = enc->coded_frame->error[j];
1592 scale = enc->width * enc->height * 255.0 * 255.0;
1598 p = psnr(error / scale);
1599 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1600 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1601 ost->file_index, ost->index, type[j] | 32, p);
1603 p = psnr(error_sum / scale_sum);
1604 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1605 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1606 ost->file_index, ost->index, p);
1610 /* compute min output value */
1611 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1612 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1613 ost->st->time_base, AV_TIME_BASE_Q));
1615 nb_frames_drop += ost->last_droped;
1618 secs = FFABS(pts) / AV_TIME_BASE;
1619 us = FFABS(pts) % AV_TIME_BASE;
1625 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1627 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1629 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1630 "size=%8.0fkB time=", total_size / 1024.0);
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1633 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1634 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1635 (100 * us) / AV_TIME_BASE);
1638 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1639 av_bprintf(&buf_script, "bitrate=N/A\n");
1641 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1642 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1645 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1646 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1647 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1648 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1649 hours, mins, secs, us);
1651 if (nb_frames_dup || nb_frames_drop)
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1653 nb_frames_dup, nb_frames_drop);
1654 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1655 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1657 if (print_stats || is_last_report) {
1658 const char end = is_last_report ? '\n' : '\r';
1659 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1660 fprintf(stderr, "%s %c", buf, end);
1662 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1667 if (progress_avio) {
1668 av_bprintf(&buf_script, "progress=%s\n",
1669 is_last_report ? "end" : "continue");
1670 avio_write(progress_avio, buf_script.str,
1671 FFMIN(buf_script.len, buf_script.size - 1));
1672 avio_flush(progress_avio);
1673 av_bprint_finalize(&buf_script, NULL);
1674 if (is_last_report) {
1675 avio_closep(&progress_avio);
1680 print_final_stats(total_size);
1683 static void flush_encoders(void)
1687 for (i = 0; i < nb_output_streams; i++) {
1688 OutputStream *ost = output_streams[i];
1689 AVCodecContext *enc = ost->enc_ctx;
1690 AVFormatContext *os = output_files[ost->file_index]->ctx;
1691 int stop_encoding = 0;
1693 if (!ost->encoding_needed)
1696 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1698 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1702 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1705 switch (enc->codec_type) {
1706 case AVMEDIA_TYPE_AUDIO:
1707 encode = avcodec_encode_audio2;
1710 case AVMEDIA_TYPE_VIDEO:
1711 encode = avcodec_encode_video2;
1722 av_init_packet(&pkt);
1726 update_benchmark(NULL);
1727 ret = encode(enc, &pkt, NULL, &got_packet);
1728 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1730 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1733 if (ost->logfile && enc->stats_out) {
1734 fprintf(ost->logfile, "%s", enc->stats_out);
1740 if (ost->finished & MUXER_FINISHED) {
1741 av_free_packet(&pkt);
1744 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1745 pkt_size = pkt.size;
1746 write_frame(os, &pkt, ost);
1747 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1748 do_video_stats(ost, pkt_size);
1759 * Check whether a packet from ist should be written into ost at this time
1761 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1763 OutputFile *of = output_files[ost->file_index];
1764 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1766 if (ost->source_index != ist_index)
1772 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1778 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1780 OutputFile *of = output_files[ost->file_index];
1781 InputFile *f = input_files [ist->file_index];
1782 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1783 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1784 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1788 av_init_packet(&opkt);
1790 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1791 !ost->copy_initial_nonkeyframes)
1794 if (pkt->pts == AV_NOPTS_VALUE) {
1795 if (!ost->frame_number && ist->pts < start_time &&
1796 !ost->copy_prior_start)
1799 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1800 !ost->copy_prior_start)
1804 if (of->recording_time != INT64_MAX &&
1805 ist->pts >= of->recording_time + start_time) {
1806 close_output_stream(ost);
1810 if (f->recording_time != INT64_MAX) {
1811 start_time = f->ctx->start_time;
1812 if (f->start_time != AV_NOPTS_VALUE)
1813 start_time += f->start_time;
1814 if (ist->pts >= f->recording_time + start_time) {
1815 close_output_stream(ost);
1820 /* force the input stream PTS */
1821 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1824 if (pkt->pts != AV_NOPTS_VALUE)
1825 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1827 opkt.pts = AV_NOPTS_VALUE;
1829 if (pkt->dts == AV_NOPTS_VALUE)
1830 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1832 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1833 opkt.dts -= ost_tb_start_time;
1835 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1836 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1838 duration = ist->dec_ctx->frame_size;
1839 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1840 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1841 ost->st->time_base) - ost_tb_start_time;
1844 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1845 opkt.flags = pkt->flags;
1847 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1848 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1849 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1850 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1851 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1853 if (av_parser_change(ost->parser, ost->st->codec,
1854 &opkt.data, &opkt.size,
1855 pkt->data, pkt->size,
1856 pkt->flags & AV_PKT_FLAG_KEY)) {
1857 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1862 opkt.data = pkt->data;
1863 opkt.size = pkt->size;
1865 av_copy_packet_side_data(&opkt, pkt);
1867 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1868 /* store AVPicture in AVPacket, as expected by the output format */
1869 avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1870 opkt.data = (uint8_t *)&pict;
1871 opkt.size = sizeof(AVPicture);
1872 opkt.flags |= AV_PKT_FLAG_KEY;
1875 write_frame(of->ctx, &opkt, ost);
1878 int guess_input_channel_layout(InputStream *ist)
1880 AVCodecContext *dec = ist->dec_ctx;
1882 if (!dec->channel_layout) {
1883 char layout_name[256];
1885 if (dec->channels > ist->guess_layout_max)
1887 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1888 if (!dec->channel_layout)
1890 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1891 dec->channels, dec->channel_layout);
1892 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1893 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1898 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1900 AVFrame *decoded_frame, *f;
1901 AVCodecContext *avctx = ist->dec_ctx;
1902 int i, ret, err = 0, resample_changed;
1903 AVRational decoded_frame_tb;
1905 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1906 return AVERROR(ENOMEM);
1907 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1908 return AVERROR(ENOMEM);
1909 decoded_frame = ist->decoded_frame;
1911 update_benchmark(NULL);
1912 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1913 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1915 if (ret >= 0 && avctx->sample_rate <= 0) {
1916 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1917 ret = AVERROR_INVALIDDATA;
1920 if (*got_output || ret<0)
1921 decode_error_stat[ret<0] ++;
1923 if (ret < 0 && exit_on_error)
1926 if (!*got_output || ret < 0)
1929 ist->samples_decoded += decoded_frame->nb_samples;
1930 ist->frames_decoded++;
1933 /* increment next_dts to use for the case where the input stream does not
1934 have timestamps or there are multiple frames in the packet */
1935 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1937 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1941 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1942 ist->resample_channels != avctx->channels ||
1943 ist->resample_channel_layout != decoded_frame->channel_layout ||
1944 ist->resample_sample_rate != decoded_frame->sample_rate;
1945 if (resample_changed) {
1946 char layout1[64], layout2[64];
1948 if (!guess_input_channel_layout(ist)) {
1949 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1950 "layout for Input Stream #%d.%d\n", ist->file_index,
1954 decoded_frame->channel_layout = avctx->channel_layout;
1956 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1957 ist->resample_channel_layout);
1958 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1959 decoded_frame->channel_layout);
1961 av_log(NULL, AV_LOG_INFO,
1962 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1963 ist->file_index, ist->st->index,
1964 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1965 ist->resample_channels, layout1,
1966 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1967 avctx->channels, layout2);
1969 ist->resample_sample_fmt = decoded_frame->format;
1970 ist->resample_sample_rate = decoded_frame->sample_rate;
1971 ist->resample_channel_layout = decoded_frame->channel_layout;
1972 ist->resample_channels = avctx->channels;
1974 for (i = 0; i < nb_filtergraphs; i++)
1975 if (ist_in_filtergraph(filtergraphs[i], ist)) {
1976 FilterGraph *fg = filtergraphs[i];
1977 if (configure_filtergraph(fg) < 0) {
1978 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1984 /* if the decoder provides a pts, use it instead of the last packet pts.
1985 the decoder could be delaying output by a packet or more. */
1986 if (decoded_frame->pts != AV_NOPTS_VALUE) {
1987 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1988 decoded_frame_tb = avctx->time_base;
1989 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1990 decoded_frame->pts = decoded_frame->pkt_pts;
1991 decoded_frame_tb = ist->st->time_base;
1992 } else if (pkt->pts != AV_NOPTS_VALUE) {
1993 decoded_frame->pts = pkt->pts;
1994 decoded_frame_tb = ist->st->time_base;
1996 decoded_frame->pts = ist->dts;
1997 decoded_frame_tb = AV_TIME_BASE_Q;
1999 pkt->pts = AV_NOPTS_VALUE;
2000 if (decoded_frame->pts != AV_NOPTS_VALUE)
2001 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2002 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2003 (AVRational){1, avctx->sample_rate});
2004 for (i = 0; i < ist->nb_filters; i++) {
2005 if (i < ist->nb_filters - 1) {
2006 f = ist->filter_frame;
2007 err = av_frame_ref(f, decoded_frame);
2012 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2013 AV_BUFFERSRC_FLAG_PUSH);
2014 if (err == AVERROR_EOF)
2015 err = 0; /* ignore */
2019 decoded_frame->pts = AV_NOPTS_VALUE;
2021 av_frame_unref(ist->filter_frame);
2022 av_frame_unref(decoded_frame);
2023 return err < 0 ? err : ret;
2026 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2028 AVFrame *decoded_frame, *f;
2029 int i, ret = 0, err = 0, resample_changed;
2030 int64_t best_effort_timestamp;
2031 AVRational *frame_sample_aspect;
2033 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2034 return AVERROR(ENOMEM);
2035 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2036 return AVERROR(ENOMEM);
2037 decoded_frame = ist->decoded_frame;
2038 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2040 update_benchmark(NULL);
2041 ret = avcodec_decode_video2(ist->dec_ctx,
2042 decoded_frame, got_output, pkt);
2043 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2045 // The following line may be required in some cases where there is no parser
2046 // or the parser does not has_b_frames correctly
2047 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2048 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2049 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2051 av_log_ask_for_sample(
2053 "has_b_frames is larger in decoder than demuxer %d > %d ",
2054 ist->dec_ctx->has_b_frames,
2055 ist->st->codec->has_b_frames
2059 if (*got_output || ret<0)
2060 decode_error_stat[ret<0] ++;
2062 if (ret < 0 && exit_on_error)
2065 if (*got_output && ret >= 0) {
2066 if (ist->dec_ctx->width != decoded_frame->width ||
2067 ist->dec_ctx->height != decoded_frame->height ||
2068 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2069 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2070 decoded_frame->width,
2071 decoded_frame->height,
2072 decoded_frame->format,
2073 ist->dec_ctx->width,
2074 ist->dec_ctx->height,
2075 ist->dec_ctx->pix_fmt);
2079 if (!*got_output || ret < 0)
2082 if(ist->top_field_first>=0)
2083 decoded_frame->top_field_first = ist->top_field_first;
2085 ist->frames_decoded++;
2087 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2088 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2092 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2094 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2095 if(best_effort_timestamp != AV_NOPTS_VALUE)
2096 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2099 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2100 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2101 ist->st->index, av_ts2str(decoded_frame->pts),
2102 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2103 best_effort_timestamp,
2104 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2105 decoded_frame->key_frame, decoded_frame->pict_type,
2106 ist->st->time_base.num, ist->st->time_base.den);
2111 if (ist->st->sample_aspect_ratio.num)
2112 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2114 resample_changed = ist->resample_width != decoded_frame->width ||
2115 ist->resample_height != decoded_frame->height ||
2116 ist->resample_pix_fmt != decoded_frame->format;
2117 if (resample_changed) {
2118 av_log(NULL, AV_LOG_INFO,
2119 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2120 ist->file_index, ist->st->index,
2121 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2122 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2124 ist->resample_width = decoded_frame->width;
2125 ist->resample_height = decoded_frame->height;
2126 ist->resample_pix_fmt = decoded_frame->format;
2128 for (i = 0; i < nb_filtergraphs; i++) {
2129 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2130 configure_filtergraph(filtergraphs[i]) < 0) {
2131 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2137 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2138 for (i = 0; i < ist->nb_filters; i++) {
2139 if (!frame_sample_aspect->num)
2140 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2142 if (i < ist->nb_filters - 1) {
2143 f = ist->filter_frame;
2144 err = av_frame_ref(f, decoded_frame);
2149 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2150 if (ret == AVERROR_EOF) {
2151 ret = 0; /* ignore */
2152 } else if (ret < 0) {
2153 av_log(NULL, AV_LOG_FATAL,
2154 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2160 av_frame_unref(ist->filter_frame);
2161 av_frame_unref(decoded_frame);
2162 return err < 0 ? err : ret;
2165 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2167 AVSubtitle subtitle;
2168 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2169 &subtitle, got_output, pkt);
2171 if (*got_output || ret<0)
2172 decode_error_stat[ret<0] ++;
2174 if (ret < 0 && exit_on_error)
2177 if (ret < 0 || !*got_output) {
2179 sub2video_flush(ist);
2183 if (ist->fix_sub_duration) {
2185 if (ist->prev_sub.got_output) {
2186 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2187 1000, AV_TIME_BASE);
2188 if (end < ist->prev_sub.subtitle.end_display_time) {
2189 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2190 "Subtitle duration reduced from %d to %d%s\n",
2191 ist->prev_sub.subtitle.end_display_time, end,
2192 end <= 0 ? ", dropping it" : "");
2193 ist->prev_sub.subtitle.end_display_time = end;
2196 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2197 FFSWAP(int, ret, ist->prev_sub.ret);
2198 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2206 sub2video_update(ist, &subtitle);
2208 if (!subtitle.num_rects)
2211 ist->frames_decoded++;
2213 for (i = 0; i < nb_output_streams; i++) {
2214 OutputStream *ost = output_streams[i];
2216 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2217 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2220 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2224 avsubtitle_free(&subtitle);
2228 static int send_filter_eof(InputStream *ist)
2231 for (i = 0; i < ist->nb_filters; i++) {
2233 ret = av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2235 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2243 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2244 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2250 if (!ist->saw_first_ts) {
2251 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2253 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2254 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2255 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2257 ist->saw_first_ts = 1;
2260 if (ist->next_dts == AV_NOPTS_VALUE)
2261 ist->next_dts = ist->dts;
2262 if (ist->next_pts == AV_NOPTS_VALUE)
2263 ist->next_pts = ist->pts;
2267 av_init_packet(&avpkt);
2275 if (pkt->dts != AV_NOPTS_VALUE) {
2276 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2277 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2278 ist->next_pts = ist->pts = ist->dts;
2281 // while we have more to decode or while the decoder did output something on EOF
2282 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2286 ist->pts = ist->next_pts;
2287 ist->dts = ist->next_dts;
2289 if (avpkt.size && avpkt.size != pkt->size &&
2290 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2291 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2292 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2293 ist->showed_multi_packet_warning = 1;
2296 switch (ist->dec_ctx->codec_type) {
2297 case AVMEDIA_TYPE_AUDIO:
2298 ret = decode_audio (ist, &avpkt, &got_output);
2300 case AVMEDIA_TYPE_VIDEO:
2301 ret = decode_video (ist, &avpkt, &got_output);
2302 if (avpkt.duration) {
2303 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2304 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2305 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2306 duration = ((int64_t)AV_TIME_BASE *
2307 ist->dec_ctx->framerate.den * ticks) /
2308 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2312 if(ist->dts != AV_NOPTS_VALUE && duration) {
2313 ist->next_dts += duration;
2315 ist->next_dts = AV_NOPTS_VALUE;
2318 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2320 case AVMEDIA_TYPE_SUBTITLE:
2321 ret = transcode_subtitles(ist, &avpkt, &got_output);
2328 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2329 ist->file_index, ist->st->index, av_err2str(ret));
2336 avpkt.pts= AV_NOPTS_VALUE;
2338 // touch data and size only if not EOF
2340 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2348 if (got_output && !pkt)
2352 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2353 if (!pkt && ist->decoding_needed && !got_output) {
2354 int ret = send_filter_eof(ist);
2356 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2361 /* handle stream copy */
2362 if (!ist->decoding_needed) {
2363 ist->dts = ist->next_dts;
2364 switch (ist->dec_ctx->codec_type) {
2365 case AVMEDIA_TYPE_AUDIO:
2366 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2367 ist->dec_ctx->sample_rate;
2369 case AVMEDIA_TYPE_VIDEO:
2370 if (ist->framerate.num) {
2371 // TODO: Remove work-around for c99-to-c89 issue 7
2372 AVRational time_base_q = AV_TIME_BASE_Q;
2373 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2374 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2375 } else if (pkt->duration) {
2376 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2377 } else if(ist->dec_ctx->framerate.num != 0) {
2378 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2379 ist->next_dts += ((int64_t)AV_TIME_BASE *
2380 ist->dec_ctx->framerate.den * ticks) /
2381 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2385 ist->pts = ist->dts;
2386 ist->next_pts = ist->next_dts;
2388 for (i = 0; pkt && i < nb_output_streams; i++) {
2389 OutputStream *ost = output_streams[i];
2391 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2394 do_streamcopy(ist, ost, pkt);
2400 static void print_sdp(void)
2405 AVIOContext *sdp_pb;
2406 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2410 for (i = 0, j = 0; i < nb_output_files; i++) {
2411 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2412 avc[j] = output_files[i]->ctx;
2417 av_sdp_create(avc, j, sdp, sizeof(sdp));
2419 if (!sdp_filename) {
2420 printf("SDP:\n%s\n", sdp);
2423 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2424 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2426 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2427 avio_closep(&sdp_pb);
2428 av_freep(&sdp_filename);
2435 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2438 for (i = 0; hwaccels[i].name; i++)
2439 if (hwaccels[i].pix_fmt == pix_fmt)
2440 return &hwaccels[i];
2444 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2446 InputStream *ist = s->opaque;
2447 const enum AVPixelFormat *p;
2450 for (p = pix_fmts; *p != -1; p++) {
2451 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2452 const HWAccel *hwaccel;
2454 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2457 hwaccel = get_hwaccel(*p);
2459 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2460 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2463 ret = hwaccel->init(s);
2465 if (ist->hwaccel_id == hwaccel->id) {
2466 av_log(NULL, AV_LOG_FATAL,
2467 "%s hwaccel requested for input stream #%d:%d, "
2468 "but cannot be initialized.\n", hwaccel->name,
2469 ist->file_index, ist->st->index);
2470 return AV_PIX_FMT_NONE;
2474 ist->active_hwaccel_id = hwaccel->id;
2475 ist->hwaccel_pix_fmt = *p;
2482 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2484 InputStream *ist = s->opaque;
2486 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2487 return ist->hwaccel_get_buffer(s, frame, flags);
2489 return avcodec_default_get_buffer2(s, frame, flags);
2492 static int init_input_stream(int ist_index, char *error, int error_len)
2495 InputStream *ist = input_streams[ist_index];
2497 if (ist->decoding_needed) {
2498 AVCodec *codec = ist->dec;
2500 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2501 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2502 return AVERROR(EINVAL);
2505 ist->dec_ctx->opaque = ist;
2506 ist->dec_ctx->get_format = get_format;
2507 ist->dec_ctx->get_buffer2 = get_buffer;
2508 ist->dec_ctx->thread_safe_callbacks = 1;
2510 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2511 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2512 (ist->decoding_needed & DECODING_FOR_OST)) {
2513 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2514 if (ist->decoding_needed & DECODING_FOR_FILTER)
2515 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2518 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2519 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2520 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2521 if (ret == AVERROR_EXPERIMENTAL)
2522 abort_codec_experimental(codec, 0);
2524 snprintf(error, error_len,
2525 "Error while opening decoder for input stream "
2527 ist->file_index, ist->st->index, av_err2str(ret));
2530 assert_avoptions(ist->decoder_opts);
2533 ist->next_pts = AV_NOPTS_VALUE;
2534 ist->next_dts = AV_NOPTS_VALUE;
2539 static InputStream *get_input_stream(OutputStream *ost)
2541 if (ost->source_index >= 0)
2542 return input_streams[ost->source_index];
2546 static int compare_int64(const void *a, const void *b)
2548 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2549 return va < vb ? -1 : va > vb ? +1 : 0;
2552 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2556 if (ost->encoding_needed) {
2557 AVCodec *codec = ost->enc;
2558 AVCodecContext *dec = NULL;
2561 if ((ist = get_input_stream(ost)))
2563 if (dec && dec->subtitle_header) {
2564 /* ASS code assumes this buffer is null terminated so add extra byte. */
2565 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2566 if (!ost->enc_ctx->subtitle_header)
2567 return AVERROR(ENOMEM);
2568 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2569 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2571 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2572 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2573 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2575 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2576 if (ret == AVERROR_EXPERIMENTAL)
2577 abort_codec_experimental(codec, 1);
2578 snprintf(error, error_len,
2579 "Error while opening encoder for output stream #%d:%d - "
2580 "maybe incorrect parameters such as bit_rate, rate, width or height",
2581 ost->file_index, ost->index);
2584 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2585 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
2586 av_buffersink_set_frame_size(ost->filter->filter,
2587 ost->enc_ctx->frame_size);
2588 assert_avoptions(ost->encoder_opts);
2589 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2590 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2591 " It takes bits/s as argument, not kbits/s\n");
2593 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2595 av_log(NULL, AV_LOG_FATAL,
2596 "Error initializing the output stream codec context.\n");
2600 // copy timebase while removing common factors
2601 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2602 ost->st->codec->codec= ost->enc_ctx->codec;
2604 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2606 av_log(NULL, AV_LOG_FATAL,
2607 "Error setting up codec context options.\n");
2610 // copy timebase while removing common factors
2611 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2617 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2618 AVCodecContext *avctx)
2621 int n = 1, i, size, index = 0;
2624 for (p = kf; *p; p++)
2628 pts = av_malloc_array(size, sizeof(*pts));
2630 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2635 for (i = 0; i < n; i++) {
2636 char *next = strchr(p, ',');
2641 if (!memcmp(p, "chapters", 8)) {
2643 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2646 if (avf->nb_chapters > INT_MAX - size ||
2647 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2649 av_log(NULL, AV_LOG_FATAL,
2650 "Could not allocate forced key frames array.\n");
2653 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2654 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2656 for (j = 0; j < avf->nb_chapters; j++) {
2657 AVChapter *c = avf->chapters[j];
2658 av_assert1(index < size);
2659 pts[index++] = av_rescale_q(c->start, c->time_base,
2660 avctx->time_base) + t;
2665 t = parse_time_or_die("force_key_frames", p, 1);
2666 av_assert1(index < size);
2667 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2674 av_assert0(index == size);
2675 qsort(pts, size, sizeof(*pts), compare_int64);
2676 ost->forced_kf_count = size;
2677 ost->forced_kf_pts = pts;
2680 static void report_new_stream(int input_index, AVPacket *pkt)
2682 InputFile *file = input_files[input_index];
2683 AVStream *st = file->ctx->streams[pkt->stream_index];
2685 if (pkt->stream_index < file->nb_streams_warn)
2687 av_log(file->ctx, AV_LOG_WARNING,
2688 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2689 av_get_media_type_string(st->codec->codec_type),
2690 input_index, pkt->stream_index,
2691 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2692 file->nb_streams_warn = pkt->stream_index + 1;
2695 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2697 AVDictionaryEntry *e;
2699 uint8_t *encoder_string;
2700 int encoder_string_len;
2701 int format_flags = 0;
2702 int codec_flags = 0;
2704 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2707 e = av_dict_get(of->opts, "fflags", NULL, 0);
2709 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2712 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2714 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2716 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2719 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2722 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2723 encoder_string = av_mallocz(encoder_string_len);
2724 if (!encoder_string)
2727 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2728 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2730 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2731 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2732 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2733 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2736 static int transcode_init(void)
2738 int ret = 0, i, j, k;
2739 AVFormatContext *oc;
2742 char error[1024] = {0};
2745 for (i = 0; i < nb_filtergraphs; i++) {
2746 FilterGraph *fg = filtergraphs[i];
2747 for (j = 0; j < fg->nb_outputs; j++) {
2748 OutputFilter *ofilter = fg->outputs[j];
2749 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2751 if (fg->nb_inputs != 1)
2753 for (k = nb_input_streams-1; k >= 0 ; k--)
2754 if (fg->inputs[0]->ist == input_streams[k])
2756 ofilter->ost->source_index = k;
2760 /* init framerate emulation */
2761 for (i = 0; i < nb_input_files; i++) {
2762 InputFile *ifile = input_files[i];
2763 if (ifile->rate_emu)
2764 for (j = 0; j < ifile->nb_streams; j++)
2765 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2768 /* for each output stream, we compute the right encoding parameters */
2769 for (i = 0; i < nb_output_streams; i++) {
2770 AVCodecContext *enc_ctx;
2771 AVCodecContext *dec_ctx = NULL;
2772 ost = output_streams[i];
2773 oc = output_files[ost->file_index]->ctx;
2774 ist = get_input_stream(ost);
2776 if (ost->attachment_filename)
2779 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2782 dec_ctx = ist->dec_ctx;
2784 ost->st->disposition = ist->st->disposition;
2785 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2786 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2788 for (j=0; j<oc->nb_streams; j++) {
2789 AVStream *st = oc->streams[j];
2790 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2793 if (j == oc->nb_streams)
2794 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2795 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2798 if (ost->stream_copy) {
2800 uint64_t extra_size;
2802 av_assert0(ist && !ost->filter);
2804 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2806 if (extra_size > INT_MAX) {
2807 return AVERROR(EINVAL);
2810 /* if stream_copy is selected, no need to decode or encode */
2811 enc_ctx->codec_id = dec_ctx->codec_id;
2812 enc_ctx->codec_type = dec_ctx->codec_type;
2814 if (!enc_ctx->codec_tag) {
2815 unsigned int codec_tag;
2816 if (!oc->oformat->codec_tag ||
2817 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2818 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2819 enc_ctx->codec_tag = dec_ctx->codec_tag;
2822 enc_ctx->bit_rate = dec_ctx->bit_rate;
2823 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2824 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2825 enc_ctx->field_order = dec_ctx->field_order;
2826 if (dec_ctx->extradata_size) {
2827 enc_ctx->extradata = av_mallocz(extra_size);
2828 if (!enc_ctx->extradata) {
2829 return AVERROR(ENOMEM);
2831 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2833 enc_ctx->extradata_size= dec_ctx->extradata_size;
2834 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2836 enc_ctx->time_base = ist->st->time_base;
2838 * Avi is a special case here because it supports variable fps but
2839 * having the fps and timebase differe significantly adds quite some
2842 if(!strcmp(oc->oformat->name, "avi")) {
2843 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2844 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2845 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2846 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2848 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2849 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2850 enc_ctx->ticks_per_frame = 2;
2851 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2852 && av_q2d(ist->st->time_base) < 1.0/500
2854 enc_ctx->time_base = dec_ctx->time_base;
2855 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2856 enc_ctx->time_base.den *= 2;
2857 enc_ctx->ticks_per_frame = 2;
2859 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2860 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2861 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2862 && strcmp(oc->oformat->name, "f4v")
2864 if( copy_tb<0 && dec_ctx->time_base.den
2865 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2866 && av_q2d(ist->st->time_base) < 1.0/500
2868 enc_ctx->time_base = dec_ctx->time_base;
2869 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2872 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2873 && dec_ctx->time_base.num < dec_ctx->time_base.den
2874 && dec_ctx->time_base.num > 0
2875 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2876 enc_ctx->time_base = dec_ctx->time_base;
2879 if (ist && !ost->frame_rate.num)
2880 ost->frame_rate = ist->framerate;
2881 if(ost->frame_rate.num)
2882 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2884 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2885 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2887 if (ist->st->nb_side_data) {
2888 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2889 sizeof(*ist->st->side_data));
2890 if (!ost->st->side_data)
2891 return AVERROR(ENOMEM);
2893 ost->st->nb_side_data = 0;
2894 for (j = 0; j < ist->st->nb_side_data; j++) {
2895 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2896 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2898 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2901 sd_dst->data = av_malloc(sd_src->size);
2903 return AVERROR(ENOMEM);
2904 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2905 sd_dst->size = sd_src->size;
2906 sd_dst->type = sd_src->type;
2907 ost->st->nb_side_data++;
2911 ost->parser = av_parser_init(enc_ctx->codec_id);
2913 switch (enc_ctx->codec_type) {
2914 case AVMEDIA_TYPE_AUDIO:
2915 if (audio_volume != 256) {
2916 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2919 enc_ctx->channel_layout = dec_ctx->channel_layout;
2920 enc_ctx->sample_rate = dec_ctx->sample_rate;
2921 enc_ctx->channels = dec_ctx->channels;
2922 enc_ctx->frame_size = dec_ctx->frame_size;
2923 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2924 enc_ctx->block_align = dec_ctx->block_align;
2925 enc_ctx->initial_padding = dec_ctx->delay;
2926 #if FF_API_AUDIOENC_DELAY
2927 enc_ctx->delay = dec_ctx->delay;
2929 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2930 enc_ctx->block_align= 0;
2931 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2932 enc_ctx->block_align= 0;
2934 case AVMEDIA_TYPE_VIDEO:
2935 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2936 enc_ctx->width = dec_ctx->width;
2937 enc_ctx->height = dec_ctx->height;
2938 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2939 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2941 av_mul_q(ost->frame_aspect_ratio,
2942 (AVRational){ enc_ctx->height, enc_ctx->width });
2943 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2944 "with stream copy may produce invalid files\n");
2946 else if (ist->st->sample_aspect_ratio.num)
2947 sar = ist->st->sample_aspect_ratio;
2949 sar = dec_ctx->sample_aspect_ratio;
2950 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2951 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2952 ost->st->r_frame_rate = ist->st->r_frame_rate;
2954 case AVMEDIA_TYPE_SUBTITLE:
2955 enc_ctx->width = dec_ctx->width;
2956 enc_ctx->height = dec_ctx->height;
2958 case AVMEDIA_TYPE_UNKNOWN:
2959 case AVMEDIA_TYPE_DATA:
2960 case AVMEDIA_TYPE_ATTACHMENT:
2967 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2969 /* should only happen when a default codec is not present. */
2970 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2971 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2972 ret = AVERROR(EINVAL);
2976 set_encoder_id(output_files[ost->file_index], ost);
2979 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2980 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2982 fg = init_simple_filtergraph(ist, ost);
2983 if (configure_filtergraph(fg)) {
2984 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2989 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2990 if (!ost->frame_rate.num)
2991 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2992 if (ist && !ost->frame_rate.num)
2993 ost->frame_rate = ist->framerate;
2994 if (ist && !ost->frame_rate.num)
2995 ost->frame_rate = ist->st->r_frame_rate;
2996 if (ist && !ost->frame_rate.num) {
2997 ost->frame_rate = (AVRational){25, 1};
2998 av_log(NULL, AV_LOG_WARNING,
3000 "about the input framerate is available. Falling "
3001 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3002 "if you want a different framerate.\n",
3003 ost->file_index, ost->index);
3005 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3006 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3007 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3008 ost->frame_rate = ost->enc->supported_framerates[idx];
3010 // reduce frame rate for mpeg4 to be within the spec limits
3011 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3012 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3013 ost->frame_rate.num, ost->frame_rate.den, 65535);
3017 switch (enc_ctx->codec_type) {
3018 case AVMEDIA_TYPE_AUDIO:
3019 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3020 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3021 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3022 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3023 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3025 case AVMEDIA_TYPE_VIDEO:
3026 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3027 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3028 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3029 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3030 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3031 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3032 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3034 for (j = 0; j < ost->forced_kf_count; j++)
3035 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3037 enc_ctx->time_base);
3039 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3040 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3041 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3042 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3043 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3044 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3045 if (!strncmp(ost->enc->name, "libx264", 7) &&
3046 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3047 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3048 av_log(NULL, AV_LOG_WARNING,
3049 "No pixel format specified, %s for H.264 encoding chosen.\n"
3050 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3051 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3052 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3053 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3054 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3055 av_log(NULL, AV_LOG_WARNING,
3056 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3057 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3058 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3059 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3061 ost->st->avg_frame_rate = ost->frame_rate;
3064 enc_ctx->width != dec_ctx->width ||
3065 enc_ctx->height != dec_ctx->height ||
3066 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3067 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3070 if (ost->forced_keyframes) {
3071 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3072 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3073 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3075 av_log(NULL, AV_LOG_ERROR,
3076 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3079 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3080 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3081 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3082 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3084 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3085 // parse it only for static kf timings
3086 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3087 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3091 case AVMEDIA_TYPE_SUBTITLE:
3092 enc_ctx->time_base = (AVRational){1, 1000};
3093 if (!enc_ctx->width) {
3094 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3095 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3098 case AVMEDIA_TYPE_DATA:
3106 if (ost->disposition) {
3107 static const AVOption opts[] = {
3108 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3109 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3110 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3111 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3112 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3113 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3114 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3115 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3116 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3117 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3118 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3119 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3120 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3121 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3124 static const AVClass class = {
3126 .item_name = av_default_item_name,
3128 .version = LIBAVUTIL_VERSION_INT,
3130 const AVClass *pclass = &class;
3132 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3138 /* open each encoder */
3139 for (i = 0; i < nb_output_streams; i++) {
3140 ret = init_output_stream(output_streams[i], error, sizeof(error));
3145 /* init input streams */
3146 for (i = 0; i < nb_input_streams; i++)
3147 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3148 for (i = 0; i < nb_output_streams; i++) {
3149 ost = output_streams[i];
3150 avcodec_close(ost->enc_ctx);
3155 /* discard unused programs */
3156 for (i = 0; i < nb_input_files; i++) {
3157 InputFile *ifile = input_files[i];
3158 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3159 AVProgram *p = ifile->ctx->programs[j];
3160 int discard = AVDISCARD_ALL;
3162 for (k = 0; k < p->nb_stream_indexes; k++)
3163 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3164 discard = AVDISCARD_DEFAULT;
3167 p->discard = discard;
3171 /* open files and write file headers */
3172 for (i = 0; i < nb_output_files; i++) {
3173 oc = output_files[i]->ctx;
3174 oc->interrupt_callback = int_cb;
3175 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3176 snprintf(error, sizeof(error),
3177 "Could not write header for output file #%d "
3178 "(incorrect codec parameters ?): %s",
3179 i, av_err2str(ret));
3180 ret = AVERROR(EINVAL);
3183 // assert_avoptions(output_files[i]->opts);
3184 if (strcmp(oc->oformat->name, "rtp")) {
3190 /* dump the file output parameters - cannot be done before in case
3192 for (i = 0; i < nb_output_files; i++) {
3193 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3196 /* dump the stream mapping */
3197 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3198 for (i = 0; i < nb_input_streams; i++) {
3199 ist = input_streams[i];
3201 for (j = 0; j < ist->nb_filters; j++) {
3202 if (ist->filters[j]->graph->graph_desc) {
3203 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3204 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3205 ist->filters[j]->name);
3206 if (nb_filtergraphs > 1)
3207 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3208 av_log(NULL, AV_LOG_INFO, "\n");
3213 for (i = 0; i < nb_output_streams; i++) {
3214 ost = output_streams[i];
3216 if (ost->attachment_filename) {
3217 /* an attached file */
3218 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3219 ost->attachment_filename, ost->file_index, ost->index);
3223 if (ost->filter && ost->filter->graph->graph_desc) {
3224 /* output from a complex graph */
3225 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3226 if (nb_filtergraphs > 1)
3227 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3229 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3230 ost->index, ost->enc ? ost->enc->name : "?");
3234 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3235 input_streams[ost->source_index]->file_index,
3236 input_streams[ost->source_index]->st->index,
3239 if (ost->sync_ist != input_streams[ost->source_index])
3240 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3241 ost->sync_ist->file_index,
3242 ost->sync_ist->st->index);
3243 if (ost->stream_copy)
3244 av_log(NULL, AV_LOG_INFO, " (copy)");
3246 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3247 const AVCodec *out_codec = ost->enc;
3248 const char *decoder_name = "?";
3249 const char *in_codec_name = "?";
3250 const char *encoder_name = "?";
3251 const char *out_codec_name = "?";
3252 const AVCodecDescriptor *desc;
3255 decoder_name = in_codec->name;
3256 desc = avcodec_descriptor_get(in_codec->id);
3258 in_codec_name = desc->name;
3259 if (!strcmp(decoder_name, in_codec_name))
3260 decoder_name = "native";
3264 encoder_name = out_codec->name;
3265 desc = avcodec_descriptor_get(out_codec->id);
3267 out_codec_name = desc->name;
3268 if (!strcmp(encoder_name, out_codec_name))
3269 encoder_name = "native";
3272 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3273 in_codec_name, decoder_name,
3274 out_codec_name, encoder_name);
3276 av_log(NULL, AV_LOG_INFO, "\n");
3280 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3284 if (sdp_filename || want_sdp) {
3288 transcode_init_done = 1;
3293 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3294 static int need_output(void)
3298 for (i = 0; i < nb_output_streams; i++) {
3299 OutputStream *ost = output_streams[i];
3300 OutputFile *of = output_files[ost->file_index];
3301 AVFormatContext *os = output_files[ost->file_index]->ctx;
3303 if (ost->finished ||
3304 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3306 if (ost->frame_number >= ost->max_frames) {
3308 for (j = 0; j < of->ctx->nb_streams; j++)
3309 close_output_stream(output_streams[of->ost_index + j]);
3320 * Select the output stream to process.
3322 * @return selected output stream, or NULL if none available
3324 static OutputStream *choose_output(void)
3327 int64_t opts_min = INT64_MAX;
3328 OutputStream *ost_min = NULL;
3330 for (i = 0; i < nb_output_streams; i++) {
3331 OutputStream *ost = output_streams[i];
3332 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3334 if (!ost->finished && opts < opts_min) {
3336 ost_min = ost->unavailable ? NULL : ost;
3342 static int check_keyboard_interaction(int64_t cur_time)
3345 static int64_t last_time;
3346 if (received_nb_signals)
3347 return AVERROR_EXIT;
3348 /* read_key() returns 0 on EOF */
3349 if(cur_time - last_time >= 100000 && !run_as_daemon){
3351 last_time = cur_time;
3355 return AVERROR_EXIT;
3356 if (key == '+') av_log_set_level(av_log_get_level()+10);
3357 if (key == '-') av_log_set_level(av_log_get_level()-10);
3358 if (key == 's') qp_hist ^= 1;
3361 do_hex_dump = do_pkt_dump = 0;
3362 } else if(do_pkt_dump){
3366 av_log_set_level(AV_LOG_DEBUG);
3368 if (key == 'c' || key == 'C'){
3369 char buf[4096], target[64], command[256], arg[256] = {0};
3372 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3374 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3379 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3380 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3381 target, time, command, arg);
3382 for (i = 0; i < nb_filtergraphs; i++) {
3383 FilterGraph *fg = filtergraphs[i];
3386 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3387 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3388 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3389 } else if (key == 'c') {
3390 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3391 ret = AVERROR_PATCHWELCOME;
3393 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3395 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3400 av_log(NULL, AV_LOG_ERROR,
3401 "Parse error, at least 3 arguments were expected, "
3402 "only %d given in string '%s'\n", n, buf);
3405 if (key == 'd' || key == 'D'){
3408 debug = input_streams[0]->st->codec->debug<<1;
3409 if(!debug) debug = 1;
3410 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3413 if(scanf("%d", &debug)!=1)
3414 fprintf(stderr,"error parsing debug value\n");
3415 for(i=0;i<nb_input_streams;i++) {
3416 input_streams[i]->st->codec->debug = debug;
3418 for(i=0;i<nb_output_streams;i++) {
3419 OutputStream *ost = output_streams[i];
3420 ost->enc_ctx->debug = debug;
3422 if(debug) av_log_set_level(AV_LOG_DEBUG);
3423 fprintf(stderr,"debug=%d\n", debug);
3426 fprintf(stderr, "key function\n"
3427 "? show this help\n"
3428 "+ increase verbosity\n"
3429 "- decrease verbosity\n"
3430 "c Send command to first matching filter supporting it\n"
3431 "C Send/Que command to all matching filters\n"
3432 "D cycle through available debug modes\n"
3433 "h dump packets/hex press to cycle through the 3 states\n"
3435 "s Show QP histogram\n"
3442 static void *input_thread(void *arg)
3445 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3450 ret = av_read_frame(f->ctx, &pkt);
3452 if (ret == AVERROR(EAGAIN)) {
3457 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3460 av_dup_packet(&pkt);
3461 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3462 if (flags && ret == AVERROR(EAGAIN)) {
3464 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3465 av_log(f->ctx, AV_LOG_WARNING,
3466 "Thread message queue blocking; consider raising the "
3467 "thread_queue_size option (current value: %d)\n",
3468 f->thread_queue_size);
3471 if (ret != AVERROR_EOF)
3472 av_log(f->ctx, AV_LOG_ERROR,
3473 "Unable to send packet to main thread: %s\n",
3475 av_free_packet(&pkt);
3476 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3484 static void free_input_threads(void)
3488 for (i = 0; i < nb_input_files; i++) {
3489 InputFile *f = input_files[i];
3492 if (!f || !f->in_thread_queue)
3494 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3495 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3496 av_free_packet(&pkt);
3498 pthread_join(f->thread, NULL);
3500 av_thread_message_queue_free(&f->in_thread_queue);
3504 static int init_input_threads(void)
3508 if (nb_input_files == 1)
3511 for (i = 0; i < nb_input_files; i++) {
3512 InputFile *f = input_files[i];
3514 if (f->ctx->pb ? !f->ctx->pb->seekable :
3515 strcmp(f->ctx->iformat->name, "lavfi"))
3516 f->non_blocking = 1;
3517 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3518 f->thread_queue_size, sizeof(AVPacket));
3522 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3523 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3524 av_thread_message_queue_free(&f->in_thread_queue);
3525 return AVERROR(ret);
3531 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3533 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3535 AV_THREAD_MESSAGE_NONBLOCK : 0);
3539 static int get_input_packet(InputFile *f, AVPacket *pkt)
3543 for (i = 0; i < f->nb_streams; i++) {
3544 InputStream *ist = input_streams[f->ist_index + i];
3545 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3546 int64_t now = av_gettime_relative() - ist->start;
3548 return AVERROR(EAGAIN);
3553 if (nb_input_files > 1)
3554 return get_input_packet_mt(f, pkt);
3556 return av_read_frame(f->ctx, pkt);
3559 static int got_eagain(void)
3562 for (i = 0; i < nb_output_streams; i++)
3563 if (output_streams[i]->unavailable)
3568 static void reset_eagain(void)
3571 for (i = 0; i < nb_input_files; i++)
3572 input_files[i]->eagain = 0;
3573 for (i = 0; i < nb_output_streams; i++)
3574 output_streams[i]->unavailable = 0;
3579 * - 0 -- one packet was read and processed
3580 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3581 * this function should be called again
3582 * - AVERROR_EOF -- this function should not be called again
3584 static int process_input(int file_index)
3586 InputFile *ifile = input_files[file_index];
3587 AVFormatContext *is;
3593 ret = get_input_packet(ifile, &pkt);
3595 if (ret == AVERROR(EAGAIN)) {
3600 if (ret != AVERROR_EOF) {
3601 print_error(is->filename, ret);
3606 for (i = 0; i < ifile->nb_streams; i++) {
3607 ist = input_streams[ifile->ist_index + i];
3608 if (ist->decoding_needed) {
3609 ret = process_input_packet(ist, NULL);
3614 /* mark all outputs that don't go through lavfi as finished */
3615 for (j = 0; j < nb_output_streams; j++) {
3616 OutputStream *ost = output_streams[j];
3618 if (ost->source_index == ifile->ist_index + i &&
3619 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3620 finish_output_stream(ost);
3624 ifile->eof_reached = 1;
3625 return AVERROR(EAGAIN);
3631 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3632 is->streams[pkt.stream_index]);
3634 /* the following test is needed in case new streams appear
3635 dynamically in stream : we ignore them */
3636 if (pkt.stream_index >= ifile->nb_streams) {
3637 report_new_stream(file_index, &pkt);
3638 goto discard_packet;
3641 ist = input_streams[ifile->ist_index + pkt.stream_index];
3643 ist->data_size += pkt.size;
3647 goto discard_packet;
3650 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3651 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3652 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3653 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3654 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3655 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3656 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3657 av_ts2str(input_files[ist->file_index]->ts_offset),
3658 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3661 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3662 int64_t stime, stime2;
3663 // Correcting starttime based on the enabled streams
3664 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3665 // so we instead do it here as part of discontinuity handling
3666 if ( ist->next_dts == AV_NOPTS_VALUE
3667 && ifile->ts_offset == -is->start_time
3668 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3669 int64_t new_start_time = INT64_MAX;
3670 for (i=0; i<is->nb_streams; i++) {
3671 AVStream *st = is->streams[i];
3672 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3674 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3676 if (new_start_time > is->start_time) {
3677 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3678 ifile->ts_offset = -new_start_time;
3682 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3683 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3684 ist->wrap_correction_done = 1;
3686 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3687 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3688 ist->wrap_correction_done = 0;
3690 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3691 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3692 ist->wrap_correction_done = 0;
3696 /* add the stream-global side data to the first packet */
3697 if (ist->nb_packets == 1) {
3698 if (ist->st->nb_side_data)
3699 av_packet_split_side_data(&pkt);
3700 for (i = 0; i < ist->st->nb_side_data; i++) {
3701 AVPacketSideData *src_sd = &ist->st->side_data[i];
3704 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3706 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3709 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3713 memcpy(dst_data, src_sd->data, src_sd->size);
3717 if (pkt.dts != AV_NOPTS_VALUE)
3718 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3719 if (pkt.pts != AV_NOPTS_VALUE)
3720 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3722 if (pkt.pts != AV_NOPTS_VALUE)
3723 pkt.pts *= ist->ts_scale;
3724 if (pkt.dts != AV_NOPTS_VALUE)
3725 pkt.dts *= ist->ts_scale;
3727 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3728 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3729 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3730 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3731 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3732 int64_t delta = pkt_dts - ifile->last_ts;
3733 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3734 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3735 ifile->ts_offset -= delta;
3736 av_log(NULL, AV_LOG_DEBUG,
3737 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3738 delta, ifile->ts_offset);
3739 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3740 if (pkt.pts != AV_NOPTS_VALUE)
3741 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3745 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3746 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3747 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3749 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3750 int64_t delta = pkt_dts - ist->next_dts;
3751 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3752 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3753 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3754 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3755 ifile->ts_offset -= delta;
3756 av_log(NULL, AV_LOG_DEBUG,
3757 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3758 delta, ifile->ts_offset);
3759 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3760 if (pkt.pts != AV_NOPTS_VALUE)
3761 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3764 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3765 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3766 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3767 pkt.dts = AV_NOPTS_VALUE;
3769 if (pkt.pts != AV_NOPTS_VALUE){
3770 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3771 delta = pkt_pts - ist->next_dts;
3772 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3773 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3774 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3775 pkt.pts = AV_NOPTS_VALUE;
3781 if (pkt.dts != AV_NOPTS_VALUE)
3782 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3785 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3786 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3787 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3788 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3789 av_ts2str(input_files[ist->file_index]->ts_offset),
3790 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3793 sub2video_heartbeat(ist, pkt.pts);
3795 process_input_packet(ist, &pkt);
3798 av_free_packet(&pkt);
3804 * Perform a step of transcoding for the specified filter graph.
3806 * @param[in] graph filter graph to consider
3807 * @param[out] best_ist input stream where a frame would allow to continue
3808 * @return 0 for success, <0 for error
3810 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3813 int nb_requests, nb_requests_max = 0;
3814 InputFilter *ifilter;
3818 ret = avfilter_graph_request_oldest(graph->graph);
3820 return reap_filters(0);
3822 if (ret == AVERROR_EOF) {
3823 ret = reap_filters(1);
3824 for (i = 0; i < graph->nb_outputs; i++)
3825 close_output_stream(graph->outputs[i]->ost);
3828 if (ret != AVERROR(EAGAIN))
3831 for (i = 0; i < graph->nb_inputs; i++) {
3832 ifilter = graph->inputs[i];
3834 if (input_files[ist->file_index]->eagain ||
3835 input_files[ist->file_index]->eof_reached)
3837 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3838 if (nb_requests > nb_requests_max) {
3839 nb_requests_max = nb_requests;
3845 for (i = 0; i < graph->nb_outputs; i++)
3846 graph->outputs[i]->ost->unavailable = 1;
3852 * Run a single step of transcoding.
3854 * @return 0 for success, <0 for error
3856 static int transcode_step(void)
3862 ost = choose_output();
3869 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3874 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3879 av_assert0(ost->source_index >= 0);
3880 ist = input_streams[ost->source_index];
3883 ret = process_input(ist->file_index);
3884 if (ret == AVERROR(EAGAIN)) {
3885 if (input_files[ist->file_index]->eagain)
3886 ost->unavailable = 1;
3891 return ret == AVERROR_EOF ? 0 : ret;
3893 return reap_filters(0);
3897 * The following code is the main loop of the file converter
3899 static int transcode(void)
3902 AVFormatContext *os;
3905 int64_t timer_start;
3907 ret = transcode_init();
3911 if (stdin_interaction) {
3912 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3915 timer_start = av_gettime_relative();
3918 if ((ret = init_input_threads()) < 0)
3922 while (!received_sigterm) {
3923 int64_t cur_time= av_gettime_relative();
3925 /* if 'q' pressed, exits */
3926 if (stdin_interaction)
3927 if (check_keyboard_interaction(cur_time) < 0)
3930 /* check if there's any stream where output is still needed */
3931 if (!need_output()) {
3932 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3936 ret = transcode_step();
3938 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3942 av_strerror(ret, errbuf, sizeof(errbuf));
3944 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3949 /* dump report by using the output first video and audio streams */
3950 print_report(0, timer_start, cur_time);
3953 free_input_threads();
3956 /* at the end of stream, we must flush the decoder buffers */
3957 for (i = 0; i < nb_input_streams; i++) {
3958 ist = input_streams[i];
3959 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3960 process_input_packet(ist, NULL);
3967 /* write the trailer if needed and close file */
3968 for (i = 0; i < nb_output_files; i++) {
3969 os = output_files[i]->ctx;
3970 av_write_trailer(os);
3973 /* dump report by using the first video and audio streams */
3974 print_report(1, timer_start, av_gettime_relative());
3976 /* close each encoder */
3977 for (i = 0; i < nb_output_streams; i++) {
3978 ost = output_streams[i];
3979 if (ost->encoding_needed) {
3980 av_freep(&ost->enc_ctx->stats_in);
3984 /* close each decoder */
3985 for (i = 0; i < nb_input_streams; i++) {
3986 ist = input_streams[i];
3987 if (ist->decoding_needed) {
3988 avcodec_close(ist->dec_ctx);
3989 if (ist->hwaccel_uninit)
3990 ist->hwaccel_uninit(ist->dec_ctx);
3999 free_input_threads();
4002 if (output_streams) {
4003 for (i = 0; i < nb_output_streams; i++) {
4004 ost = output_streams[i];
4007 fclose(ost->logfile);
4008 ost->logfile = NULL;
4010 av_freep(&ost->forced_kf_pts);
4011 av_freep(&ost->apad);
4012 av_freep(&ost->disposition);
4013 av_dict_free(&ost->encoder_opts);
4014 av_dict_free(&ost->swr_opts);
4015 av_dict_free(&ost->resample_opts);
4016 av_dict_free(&ost->bsf_args);
4024 static int64_t getutime(void)
4027 struct rusage rusage;
4029 getrusage(RUSAGE_SELF, &rusage);
4030 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4031 #elif HAVE_GETPROCESSTIMES
4033 FILETIME c, e, k, u;
4034 proc = GetCurrentProcess();
4035 GetProcessTimes(proc, &c, &e, &k, &u);
4036 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4038 return av_gettime_relative();
4042 static int64_t getmaxrss(void)
4044 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4045 struct rusage rusage;
4046 getrusage(RUSAGE_SELF, &rusage);
4047 return (int64_t)rusage.ru_maxrss * 1024;
4048 #elif HAVE_GETPROCESSMEMORYINFO
4050 PROCESS_MEMORY_COUNTERS memcounters;
4051 proc = GetCurrentProcess();
4052 memcounters.cb = sizeof(memcounters);
4053 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4054 return memcounters.PeakPagefileUsage;
4060 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4064 int main(int argc, char **argv)
4069 register_exit(ffmpeg_cleanup);
4071 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4073 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4074 parse_loglevel(argc, argv, options);
4076 if(argc>1 && !strcmp(argv[1], "-d")){
4078 av_log_set_callback(log_callback_null);
4083 avcodec_register_all();
4085 avdevice_register_all();
4087 avfilter_register_all();
4089 avformat_network_init();
4091 show_banner(argc, argv, options);
4095 /* parse options and open all input/output files */
4096 ret = ffmpeg_parse_options(argc, argv);
4100 if (nb_output_files <= 0 && nb_input_files == 0) {
4102 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4106 /* file converter / grab */
4107 if (nb_output_files <= 0) {
4108 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4112 // if (nb_input_files == 0) {
4113 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4117 current_time = ti = getutime();
4118 if (transcode() < 0)
4120 ti = getutime() - ti;
4122 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4124 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4125 decode_error_stat[0], decode_error_stat[1]);
4126 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4129 exit_program(received_nb_signals ? 255 : main_return_code);
4130 return main_return_code;