2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num && ost->is_cfr) {
676 if (pkt->duration > 0)
677 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
678 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
684 av_packet_split_side_data(pkt);
687 AVPacket new_pkt = *pkt;
688 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
691 int a = av_bitstream_filter_filter(bsfc, avctx,
692 bsf_arg ? bsf_arg->value : NULL,
693 &new_pkt.data, &new_pkt.size,
694 pkt->data, pkt->size,
695 pkt->flags & AV_PKT_FLAG_KEY);
696 if(a == 0 && new_pkt.data != pkt->data) {
697 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
699 memcpy(t, new_pkt.data, new_pkt.size);
700 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
708 pkt->side_data = NULL;
709 pkt->side_data_elems = 0;
710 av_packet_unref(pkt);
711 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
712 av_buffer_default_free, NULL, 0);
717 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
718 bsfc->filter->name, pkt->stream_index,
719 avctx->codec ? avctx->codec->name : "copy");
729 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
730 if (pkt->dts != AV_NOPTS_VALUE &&
731 pkt->pts != AV_NOPTS_VALUE &&
732 pkt->dts > pkt->pts) {
733 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
735 ost->file_index, ost->st->index);
737 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
738 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
739 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
742 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
743 pkt->dts != AV_NOPTS_VALUE &&
744 ost->last_mux_dts != AV_NOPTS_VALUE) {
745 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
746 if (pkt->dts < max) {
747 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
748 av_log(s, loglevel, "Non-monotonous DTS in output stream "
749 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
750 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
752 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
755 av_log(s, loglevel, "changing to %"PRId64". This may result "
756 "in incorrect timestamps in the output file.\n",
758 if(pkt->pts >= pkt->dts)
759 pkt->pts = FFMAX(pkt->pts, max);
764 ost->last_mux_dts = pkt->dts;
766 ost->data_size += pkt->size;
767 ost->packets_written++;
769 pkt->stream_index = ost->index;
772 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
773 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
774 av_get_media_type_string(ost->enc_ctx->codec_type),
775 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
776 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
781 ret = av_interleaved_write_frame(s, pkt);
783 print_error("av_interleaved_write_frame()", ret);
784 main_return_code = 1;
785 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
787 av_packet_unref(pkt);
790 static void close_output_stream(OutputStream *ost)
792 OutputFile *of = output_files[ost->file_index];
794 ost->finished |= ENCODER_FINISHED;
796 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
797 of->recording_time = FFMIN(of->recording_time, end);
801 static int check_recording_time(OutputStream *ost)
803 OutputFile *of = output_files[ost->file_index];
805 if (of->recording_time != INT64_MAX &&
806 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
807 AV_TIME_BASE_Q) >= 0) {
808 close_output_stream(ost);
814 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
817 AVCodecContext *enc = ost->enc_ctx;
821 av_init_packet(&pkt);
825 if (!check_recording_time(ost))
828 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
829 frame->pts = ost->sync_opts;
830 ost->sync_opts = frame->pts + frame->nb_samples;
831 ost->samples_encoded += frame->nb_samples;
832 ost->frames_encoded++;
834 av_assert0(pkt.size || !pkt.data);
835 update_benchmark(NULL);
837 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
838 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
839 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
840 enc->time_base.num, enc->time_base.den);
843 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
844 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
847 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
850 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
853 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
854 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
855 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
856 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
859 write_frame(s, &pkt, ost);
863 static void do_subtitle_out(AVFormatContext *s,
868 int subtitle_out_max_size = 1024 * 1024;
869 int subtitle_out_size, nb, i;
874 if (sub->pts == AV_NOPTS_VALUE) {
875 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
884 subtitle_out = av_malloc(subtitle_out_max_size);
886 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
891 /* Note: DVB subtitle need one packet to draw them and one other
892 packet to clear them */
893 /* XXX: signal it in the codec context ? */
894 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
899 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
901 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
902 pts -= output_files[ost->file_index]->start_time;
903 for (i = 0; i < nb; i++) {
904 unsigned save_num_rects = sub->num_rects;
906 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
907 if (!check_recording_time(ost))
911 // start_display_time is required to be 0
912 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
913 sub->end_display_time -= sub->start_display_time;
914 sub->start_display_time = 0;
918 ost->frames_encoded++;
920 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
921 subtitle_out_max_size, sub);
923 sub->num_rects = save_num_rects;
924 if (subtitle_out_size < 0) {
925 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
929 av_init_packet(&pkt);
930 pkt.data = subtitle_out;
931 pkt.size = subtitle_out_size;
932 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
933 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
934 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
935 /* XXX: the pts correction is handled here. Maybe handling
936 it in the codec would be better */
938 pkt.pts += 90 * sub->start_display_time;
940 pkt.pts += 90 * sub->end_display_time;
943 write_frame(s, &pkt, ost);
947 static void do_video_out(AVFormatContext *s,
949 AVFrame *next_picture,
952 int ret, format_video_sync;
954 AVCodecContext *enc = ost->enc_ctx;
955 AVCodecContext *mux_enc = ost->st->codec;
956 int nb_frames, nb0_frames, i;
957 double delta, delta0;
960 InputStream *ist = NULL;
961 AVFilterContext *filter = ost->filter->filter;
963 if (ost->source_index >= 0)
964 ist = input_streams[ost->source_index];
966 if (filter->inputs[0]->frame_rate.num > 0 &&
967 filter->inputs[0]->frame_rate.den > 0)
968 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
970 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
971 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
973 if (!ost->filters_script &&
977 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
978 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
983 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
984 ost->last_nb0_frames[1],
985 ost->last_nb0_frames[2]);
987 delta0 = sync_ipts - ost->sync_opts;
988 delta = delta0 + duration;
990 /* by default, we output a single frame */
994 format_video_sync = video_sync_method;
995 if (format_video_sync == VSYNC_AUTO) {
996 if(!strcmp(s->oformat->name, "avi")) {
997 format_video_sync = VSYNC_VFR;
999 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1001 && format_video_sync == VSYNC_CFR
1002 && input_files[ist->file_index]->ctx->nb_streams == 1
1003 && input_files[ist->file_index]->input_ts_offset == 0) {
1004 format_video_sync = VSYNC_VSCFR;
1006 if (format_video_sync == VSYNC_CFR && copy_ts) {
1007 format_video_sync = VSYNC_VSCFR;
1010 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1014 format_video_sync != VSYNC_PASSTHROUGH &&
1015 format_video_sync != VSYNC_DROP) {
1016 double cor = FFMIN(-delta0, duration);
1017 if (delta0 < -0.6) {
1018 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1020 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1026 switch (format_video_sync) {
1028 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1029 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1032 ost->sync_opts = lrint(sync_ipts);
1035 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1036 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1038 } else if (delta < -1.1)
1040 else if (delta > 1.1) {
1041 nb_frames = lrintf(delta);
1043 nb0_frames = lrintf(delta0 - 0.6);
1049 else if (delta > 0.6)
1050 ost->sync_opts = lrint(sync_ipts);
1053 case VSYNC_PASSTHROUGH:
1054 ost->sync_opts = lrint(sync_ipts);
1061 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1062 nb0_frames = FFMIN(nb0_frames, nb_frames);
1064 memmove(ost->last_nb0_frames + 1,
1065 ost->last_nb0_frames,
1066 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1067 ost->last_nb0_frames[0] = nb0_frames;
1069 if (nb0_frames == 0 && ost->last_droped) {
1071 av_log(NULL, AV_LOG_VERBOSE,
1072 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1073 ost->frame_number, ost->st->index, ost->last_frame->pts);
1075 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1076 if (nb_frames > dts_error_threshold * 30) {
1077 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1081 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1082 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1084 ost->last_droped = nb_frames == nb0_frames && next_picture;
1086 /* duplicates frame if needed */
1087 for (i = 0; i < nb_frames; i++) {
1088 AVFrame *in_picture;
1089 av_init_packet(&pkt);
1093 if (i < nb0_frames && ost->last_frame) {
1094 in_picture = ost->last_frame;
1096 in_picture = next_picture;
1101 in_picture->pts = ost->sync_opts;
1104 if (!check_recording_time(ost))
1106 if (ost->frame_number >= ost->max_frames)
1110 #if FF_API_LAVF_FMT_RAWPICTURE
1111 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1112 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1113 /* raw pictures are written as AVPicture structure to
1114 avoid any copies. We support temporarily the older
1116 if (in_picture->interlaced_frame)
1117 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1119 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1120 pkt.data = (uint8_t *)in_picture;
1121 pkt.size = sizeof(AVPicture);
1122 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1123 pkt.flags |= AV_PKT_FLAG_KEY;
1125 write_frame(s, &pkt, ost);
1129 int got_packet, forced_keyframe = 0;
1132 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1133 ost->top_field_first >= 0)
1134 in_picture->top_field_first = !!ost->top_field_first;
1136 if (in_picture->interlaced_frame) {
1137 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1138 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1140 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1142 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1144 in_picture->quality = enc->global_quality;
1145 in_picture->pict_type = 0;
1147 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1148 in_picture->pts * av_q2d(enc->time_base) : NAN;
1149 if (ost->forced_kf_index < ost->forced_kf_count &&
1150 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1151 ost->forced_kf_index++;
1152 forced_keyframe = 1;
1153 } else if (ost->forced_keyframes_pexpr) {
1155 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1156 res = av_expr_eval(ost->forced_keyframes_pexpr,
1157 ost->forced_keyframes_expr_const_values, NULL);
1158 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1159 ost->forced_keyframes_expr_const_values[FKF_N],
1160 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1161 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1162 ost->forced_keyframes_expr_const_values[FKF_T],
1163 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1166 forced_keyframe = 1;
1167 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1168 ost->forced_keyframes_expr_const_values[FKF_N];
1169 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1170 ost->forced_keyframes_expr_const_values[FKF_T];
1171 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1174 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1175 } else if ( ost->forced_keyframes
1176 && !strncmp(ost->forced_keyframes, "source", 6)
1177 && in_picture->key_frame==1) {
1178 forced_keyframe = 1;
1181 if (forced_keyframe) {
1182 in_picture->pict_type = AV_PICTURE_TYPE_I;
1183 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1186 update_benchmark(NULL);
1188 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1189 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1190 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1191 enc->time_base.num, enc->time_base.den);
1194 ost->frames_encoded++;
1196 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1197 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1199 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1205 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1206 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1207 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1208 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1211 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1212 pkt.pts = ost->sync_opts;
1214 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1217 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1218 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1219 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1220 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1223 frame_size = pkt.size;
1224 write_frame(s, &pkt, ost);
1226 /* if two pass, output log */
1227 if (ost->logfile && enc->stats_out) {
1228 fprintf(ost->logfile, "%s", enc->stats_out);
1234 * For video, number of frames in == number of packets out.
1235 * But there may be reordering, so we can't throw away frames on encoder
1236 * flush, we need to limit them here, before they go into encoder.
1238 ost->frame_number++;
1240 if (vstats_filename && frame_size)
1241 do_video_stats(ost, frame_size);
1244 if (!ost->last_frame)
1245 ost->last_frame = av_frame_alloc();
1246 av_frame_unref(ost->last_frame);
1247 if (next_picture && ost->last_frame)
1248 av_frame_ref(ost->last_frame, next_picture);
1250 av_frame_free(&ost->last_frame);
1253 static double psnr(double d)
1255 return -10.0 * log10(d);
1258 static void do_video_stats(OutputStream *ost, int frame_size)
1260 AVCodecContext *enc;
1262 double ti1, bitrate, avg_bitrate;
1264 /* this is executed just the first time do_video_stats is called */
1266 vstats_file = fopen(vstats_filename, "w");
1274 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1275 frame_number = ost->st->nb_frames;
1276 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1277 ost->quality / (float)FF_QP2LAMBDA);
1279 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1280 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1282 fprintf(vstats_file,"f_size= %6d ", frame_size);
1283 /* compute pts value */
1284 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1288 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1289 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1290 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1291 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1292 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1296 static void finish_output_stream(OutputStream *ost)
1298 OutputFile *of = output_files[ost->file_index];
1301 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1304 for (i = 0; i < of->ctx->nb_streams; i++)
1305 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1310 * Get and encode new output from any of the filtergraphs, without causing
1313 * @return 0 for success, <0 for severe errors
1315 static int reap_filters(int flush)
1317 AVFrame *filtered_frame = NULL;
1320 /* Reap all buffers present in the buffer sinks */
1321 for (i = 0; i < nb_output_streams; i++) {
1322 OutputStream *ost = output_streams[i];
1323 OutputFile *of = output_files[ost->file_index];
1324 AVFilterContext *filter;
1325 AVCodecContext *enc = ost->enc_ctx;
1330 filter = ost->filter->filter;
1332 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1333 return AVERROR(ENOMEM);
1335 filtered_frame = ost->filtered_frame;
1338 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1339 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1340 AV_BUFFERSINK_FLAG_NO_REQUEST);
1342 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1343 av_log(NULL, AV_LOG_WARNING,
1344 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1345 } else if (flush && ret == AVERROR_EOF) {
1346 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1347 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1351 if (ost->finished) {
1352 av_frame_unref(filtered_frame);
1355 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1356 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1357 AVRational tb = enc->time_base;
1358 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1360 tb.den <<= extra_bits;
1362 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1363 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1364 float_pts /= 1 << extra_bits;
1365 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1366 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1368 filtered_frame->pts =
1369 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1370 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1372 //if (ost->source_index >= 0)
1373 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1375 switch (filter->inputs[0]->type) {
1376 case AVMEDIA_TYPE_VIDEO:
1377 if (!ost->frame_aspect_ratio.num)
1378 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1381 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1382 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1384 enc->time_base.num, enc->time_base.den);
1387 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1389 case AVMEDIA_TYPE_AUDIO:
1390 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1391 enc->channels != av_frame_get_channels(filtered_frame)) {
1392 av_log(NULL, AV_LOG_ERROR,
1393 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1396 do_audio_out(of->ctx, ost, filtered_frame);
1399 // TODO support subtitle filters
1403 av_frame_unref(filtered_frame);
1410 static void print_final_stats(int64_t total_size)
1412 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1413 uint64_t subtitle_size = 0;
1414 uint64_t data_size = 0;
1415 float percent = -1.0;
1419 for (i = 0; i < nb_output_streams; i++) {
1420 OutputStream *ost = output_streams[i];
1421 switch (ost->enc_ctx->codec_type) {
1422 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1423 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1424 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1425 default: other_size += ost->data_size; break;
1427 extra_size += ost->enc_ctx->extradata_size;
1428 data_size += ost->data_size;
1429 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1430 != AV_CODEC_FLAG_PASS1)
1434 if (data_size && total_size>0 && total_size >= data_size)
1435 percent = 100.0 * (total_size - data_size) / data_size;
1437 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1438 video_size / 1024.0,
1439 audio_size / 1024.0,
1440 subtitle_size / 1024.0,
1441 other_size / 1024.0,
1442 extra_size / 1024.0);
1444 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1446 av_log(NULL, AV_LOG_INFO, "unknown");
1447 av_log(NULL, AV_LOG_INFO, "\n");
1449 /* print verbose per-stream stats */
1450 for (i = 0; i < nb_input_files; i++) {
1451 InputFile *f = input_files[i];
1452 uint64_t total_packets = 0, total_size = 0;
1454 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1455 i, f->ctx->filename);
1457 for (j = 0; j < f->nb_streams; j++) {
1458 InputStream *ist = input_streams[f->ist_index + j];
1459 enum AVMediaType type = ist->dec_ctx->codec_type;
1461 total_size += ist->data_size;
1462 total_packets += ist->nb_packets;
1464 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1465 i, j, media_type_string(type));
1466 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1467 ist->nb_packets, ist->data_size);
1469 if (ist->decoding_needed) {
1470 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1471 ist->frames_decoded);
1472 if (type == AVMEDIA_TYPE_AUDIO)
1473 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1474 av_log(NULL, AV_LOG_VERBOSE, "; ");
1477 av_log(NULL, AV_LOG_VERBOSE, "\n");
1480 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1481 total_packets, total_size);
1484 for (i = 0; i < nb_output_files; i++) {
1485 OutputFile *of = output_files[i];
1486 uint64_t total_packets = 0, total_size = 0;
1488 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1489 i, of->ctx->filename);
1491 for (j = 0; j < of->ctx->nb_streams; j++) {
1492 OutputStream *ost = output_streams[of->ost_index + j];
1493 enum AVMediaType type = ost->enc_ctx->codec_type;
1495 total_size += ost->data_size;
1496 total_packets += ost->packets_written;
1498 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1499 i, j, media_type_string(type));
1500 if (ost->encoding_needed) {
1501 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1502 ost->frames_encoded);
1503 if (type == AVMEDIA_TYPE_AUDIO)
1504 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1505 av_log(NULL, AV_LOG_VERBOSE, "; ");
1508 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1509 ost->packets_written, ost->data_size);
1511 av_log(NULL, AV_LOG_VERBOSE, "\n");
1514 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1515 total_packets, total_size);
1517 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1518 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1520 av_log(NULL, AV_LOG_WARNING, "\n");
1522 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1527 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1530 AVBPrint buf_script;
1532 AVFormatContext *oc;
1534 AVCodecContext *enc;
1535 int frame_number, vid, i;
1537 int64_t pts = INT64_MIN + 1;
1538 static int64_t last_time = -1;
1539 static int qp_histogram[52];
1540 int hours, mins, secs, us;
1542 if (!print_stats && !is_last_report && !progress_avio)
1545 if (!is_last_report) {
1546 if (last_time == -1) {
1547 last_time = cur_time;
1550 if ((cur_time - last_time) < 500000)
1552 last_time = cur_time;
1556 oc = output_files[0]->ctx;
1558 total_size = avio_size(oc->pb);
1559 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1560 total_size = avio_tell(oc->pb);
1564 av_bprint_init(&buf_script, 0, 1);
1565 for (i = 0; i < nb_output_streams; i++) {
1567 ost = output_streams[i];
1569 if (!ost->stream_copy)
1570 q = ost->quality / (float) FF_QP2LAMBDA;
1572 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1573 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1574 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1575 ost->file_index, ost->index, q);
1577 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1578 float fps, t = (cur_time-timer_start) / 1000000.0;
1580 frame_number = ost->frame_number;
1581 fps = t > 1 ? frame_number / t : 0;
1582 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1583 frame_number, fps < 9.95, fps, q);
1584 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1585 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1586 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1587 ost->file_index, ost->index, q);
1589 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1593 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1595 for (j = 0; j < 32; j++)
1596 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1599 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1601 double error, error_sum = 0;
1602 double scale, scale_sum = 0;
1604 char type[3] = { 'Y','U','V' };
1605 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1606 for (j = 0; j < 3; j++) {
1607 if (is_last_report) {
1608 error = enc->error[j];
1609 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1611 error = ost->error[j];
1612 scale = enc->width * enc->height * 255.0 * 255.0;
1618 p = psnr(error / scale);
1619 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1620 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1621 ost->file_index, ost->index, type[j] | 32, p);
1623 p = psnr(error_sum / scale_sum);
1624 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1625 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1626 ost->file_index, ost->index, p);
1630 /* compute min output value */
1631 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1632 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1633 ost->st->time_base, AV_TIME_BASE_Q));
1635 nb_frames_drop += ost->last_droped;
1638 secs = FFABS(pts) / AV_TIME_BASE;
1639 us = FFABS(pts) % AV_TIME_BASE;
1645 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1647 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1649 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1650 "size=%8.0fkB time=", total_size / 1024.0);
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1654 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1655 (100 * us) / AV_TIME_BASE);
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1659 av_bprintf(&buf_script, "bitrate=N/A\n");
1661 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1662 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1665 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1666 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1667 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1668 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1669 hours, mins, secs, us);
1671 if (nb_frames_dup || nb_frames_drop)
1672 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1673 nb_frames_dup, nb_frames_drop);
1674 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1675 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1677 if (print_stats || is_last_report) {
1678 const char end = is_last_report ? '\n' : '\r';
1679 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1680 fprintf(stderr, "%s %c", buf, end);
1682 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1687 if (progress_avio) {
1688 av_bprintf(&buf_script, "progress=%s\n",
1689 is_last_report ? "end" : "continue");
1690 avio_write(progress_avio, buf_script.str,
1691 FFMIN(buf_script.len, buf_script.size - 1));
1692 avio_flush(progress_avio);
1693 av_bprint_finalize(&buf_script, NULL);
1694 if (is_last_report) {
1695 avio_closep(&progress_avio);
1700 print_final_stats(total_size);
1703 static void flush_encoders(void)
1707 for (i = 0; i < nb_output_streams; i++) {
1708 OutputStream *ost = output_streams[i];
1709 AVCodecContext *enc = ost->enc_ctx;
1710 AVFormatContext *os = output_files[ost->file_index]->ctx;
1711 int stop_encoding = 0;
1713 if (!ost->encoding_needed)
1716 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1718 #if FF_API_LAVF_FMT_RAWPICTURE
1719 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1724 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1727 switch (enc->codec_type) {
1728 case AVMEDIA_TYPE_AUDIO:
1729 encode = avcodec_encode_audio2;
1732 case AVMEDIA_TYPE_VIDEO:
1733 encode = avcodec_encode_video2;
1744 av_init_packet(&pkt);
1748 update_benchmark(NULL);
1749 ret = encode(enc, &pkt, NULL, &got_packet);
1750 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1752 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1757 if (ost->logfile && enc->stats_out) {
1758 fprintf(ost->logfile, "%s", enc->stats_out);
1764 if (ost->finished & MUXER_FINISHED) {
1765 av_packet_unref(&pkt);
1768 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1769 pkt_size = pkt.size;
1770 write_frame(os, &pkt, ost);
1771 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1772 do_video_stats(ost, pkt_size);
1783 * Check whether a packet from ist should be written into ost at this time
1785 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1787 OutputFile *of = output_files[ost->file_index];
1788 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1790 if (ost->source_index != ist_index)
1796 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1802 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1804 OutputFile *of = output_files[ost->file_index];
1805 InputFile *f = input_files [ist->file_index];
1806 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1807 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1808 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1812 av_init_packet(&opkt);
1814 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1815 !ost->copy_initial_nonkeyframes)
1818 if (pkt->pts == AV_NOPTS_VALUE) {
1819 if (!ost->frame_number && ist->pts < start_time &&
1820 !ost->copy_prior_start)
1823 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1824 !ost->copy_prior_start)
1828 if (of->recording_time != INT64_MAX &&
1829 ist->pts >= of->recording_time + start_time) {
1830 close_output_stream(ost);
1834 if (f->recording_time != INT64_MAX) {
1835 start_time = f->ctx->start_time;
1836 if (f->start_time != AV_NOPTS_VALUE)
1837 start_time += f->start_time;
1838 if (ist->pts >= f->recording_time + start_time) {
1839 close_output_stream(ost);
1844 /* force the input stream PTS */
1845 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1848 if (pkt->pts != AV_NOPTS_VALUE)
1849 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1851 opkt.pts = AV_NOPTS_VALUE;
1853 if (pkt->dts == AV_NOPTS_VALUE)
1854 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1856 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1857 opkt.dts -= ost_tb_start_time;
1859 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1860 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1862 duration = ist->dec_ctx->frame_size;
1863 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1864 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1865 ost->st->time_base) - ost_tb_start_time;
1868 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1869 opkt.flags = pkt->flags;
1870 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1871 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1872 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1873 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1874 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1876 int ret = av_parser_change(ost->parser, ost->st->codec,
1877 &opkt.data, &opkt.size,
1878 pkt->data, pkt->size,
1879 pkt->flags & AV_PKT_FLAG_KEY);
1881 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1886 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1891 opkt.data = pkt->data;
1892 opkt.size = pkt->size;
1894 av_copy_packet_side_data(&opkt, pkt);
1896 #if FF_API_LAVF_FMT_RAWPICTURE
1897 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1898 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1899 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1900 /* store AVPicture in AVPacket, as expected by the output format */
1901 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1903 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1907 opkt.data = (uint8_t *)&pict;
1908 opkt.size = sizeof(AVPicture);
1909 opkt.flags |= AV_PKT_FLAG_KEY;
1913 write_frame(of->ctx, &opkt, ost);
1916 int guess_input_channel_layout(InputStream *ist)
1918 AVCodecContext *dec = ist->dec_ctx;
1920 if (!dec->channel_layout) {
1921 char layout_name[256];
1923 if (dec->channels > ist->guess_layout_max)
1925 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1926 if (!dec->channel_layout)
1928 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1929 dec->channels, dec->channel_layout);
1930 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1931 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1936 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1938 if (*got_output || ret<0)
1939 decode_error_stat[ret<0] ++;
1941 if (ret < 0 && exit_on_error)
1944 if (exit_on_error && *got_output && ist) {
1945 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1946 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1952 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1954 AVFrame *decoded_frame, *f;
1955 AVCodecContext *avctx = ist->dec_ctx;
1956 int i, ret, err = 0, resample_changed;
1957 AVRational decoded_frame_tb;
1959 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1960 return AVERROR(ENOMEM);
1961 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1962 return AVERROR(ENOMEM);
1963 decoded_frame = ist->decoded_frame;
1965 update_benchmark(NULL);
1966 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1967 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1969 if (ret >= 0 && avctx->sample_rate <= 0) {
1970 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1971 ret = AVERROR_INVALIDDATA;
1974 check_decode_result(ist, got_output, ret);
1976 if (!*got_output || ret < 0)
1979 ist->samples_decoded += decoded_frame->nb_samples;
1980 ist->frames_decoded++;
1983 /* increment next_dts to use for the case where the input stream does not
1984 have timestamps or there are multiple frames in the packet */
1985 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1987 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1991 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1992 ist->resample_channels != avctx->channels ||
1993 ist->resample_channel_layout != decoded_frame->channel_layout ||
1994 ist->resample_sample_rate != decoded_frame->sample_rate;
1995 if (resample_changed) {
1996 char layout1[64], layout2[64];
1998 if (!guess_input_channel_layout(ist)) {
1999 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2000 "layout for Input Stream #%d.%d\n", ist->file_index,
2004 decoded_frame->channel_layout = avctx->channel_layout;
2006 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2007 ist->resample_channel_layout);
2008 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2009 decoded_frame->channel_layout);
2011 av_log(NULL, AV_LOG_INFO,
2012 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2013 ist->file_index, ist->st->index,
2014 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2015 ist->resample_channels, layout1,
2016 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2017 avctx->channels, layout2);
2019 ist->resample_sample_fmt = decoded_frame->format;
2020 ist->resample_sample_rate = decoded_frame->sample_rate;
2021 ist->resample_channel_layout = decoded_frame->channel_layout;
2022 ist->resample_channels = avctx->channels;
2024 for (i = 0; i < nb_filtergraphs; i++)
2025 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2026 FilterGraph *fg = filtergraphs[i];
2027 if (configure_filtergraph(fg) < 0) {
2028 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2034 /* if the decoder provides a pts, use it instead of the last packet pts.
2035 the decoder could be delaying output by a packet or more. */
2036 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2037 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2038 decoded_frame_tb = avctx->time_base;
2039 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2040 decoded_frame->pts = decoded_frame->pkt_pts;
2041 decoded_frame_tb = ist->st->time_base;
2042 } else if (pkt->pts != AV_NOPTS_VALUE) {
2043 decoded_frame->pts = pkt->pts;
2044 decoded_frame_tb = ist->st->time_base;
2046 decoded_frame->pts = ist->dts;
2047 decoded_frame_tb = AV_TIME_BASE_Q;
2049 pkt->pts = AV_NOPTS_VALUE;
2050 if (decoded_frame->pts != AV_NOPTS_VALUE)
2051 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2052 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2053 (AVRational){1, avctx->sample_rate});
2054 ist->nb_samples = decoded_frame->nb_samples;
2055 for (i = 0; i < ist->nb_filters; i++) {
2056 if (i < ist->nb_filters - 1) {
2057 f = ist->filter_frame;
2058 err = av_frame_ref(f, decoded_frame);
2063 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2064 AV_BUFFERSRC_FLAG_PUSH);
2065 if (err == AVERROR_EOF)
2066 err = 0; /* ignore */
2070 decoded_frame->pts = AV_NOPTS_VALUE;
2072 av_frame_unref(ist->filter_frame);
2073 av_frame_unref(decoded_frame);
2074 return err < 0 ? err : ret;
2077 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2079 AVFrame *decoded_frame, *f;
2080 int i, ret = 0, err = 0, resample_changed;
2081 int64_t best_effort_timestamp;
2082 AVRational *frame_sample_aspect;
2084 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2085 return AVERROR(ENOMEM);
2086 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2087 return AVERROR(ENOMEM);
2088 decoded_frame = ist->decoded_frame;
2089 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2091 update_benchmark(NULL);
2092 ret = avcodec_decode_video2(ist->dec_ctx,
2093 decoded_frame, got_output, pkt);
2094 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2096 // The following line may be required in some cases where there is no parser
2097 // or the parser does not has_b_frames correctly
2098 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2099 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2100 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2102 av_log(ist->dec_ctx, AV_LOG_WARNING,
2103 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2104 "If you want to help, upload a sample "
2105 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2106 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2107 ist->dec_ctx->has_b_frames,
2108 ist->st->codec->has_b_frames);
2111 check_decode_result(ist, got_output, ret);
2113 if (*got_output && ret >= 0) {
2114 if (ist->dec_ctx->width != decoded_frame->width ||
2115 ist->dec_ctx->height != decoded_frame->height ||
2116 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2117 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2118 decoded_frame->width,
2119 decoded_frame->height,
2120 decoded_frame->format,
2121 ist->dec_ctx->width,
2122 ist->dec_ctx->height,
2123 ist->dec_ctx->pix_fmt);
2127 if (!*got_output || ret < 0)
2130 if(ist->top_field_first>=0)
2131 decoded_frame->top_field_first = ist->top_field_first;
2133 ist->frames_decoded++;
2135 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2136 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2140 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2142 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2143 if(best_effort_timestamp != AV_NOPTS_VALUE)
2144 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2147 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2148 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2149 ist->st->index, av_ts2str(decoded_frame->pts),
2150 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2151 best_effort_timestamp,
2152 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2153 decoded_frame->key_frame, decoded_frame->pict_type,
2154 ist->st->time_base.num, ist->st->time_base.den);
2159 if (ist->st->sample_aspect_ratio.num)
2160 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2162 resample_changed = ist->resample_width != decoded_frame->width ||
2163 ist->resample_height != decoded_frame->height ||
2164 ist->resample_pix_fmt != decoded_frame->format;
2165 if (resample_changed) {
2166 av_log(NULL, AV_LOG_INFO,
2167 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2168 ist->file_index, ist->st->index,
2169 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2170 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2172 ist->resample_width = decoded_frame->width;
2173 ist->resample_height = decoded_frame->height;
2174 ist->resample_pix_fmt = decoded_frame->format;
2176 for (i = 0; i < nb_filtergraphs; i++) {
2177 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2178 configure_filtergraph(filtergraphs[i]) < 0) {
2179 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2185 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2186 for (i = 0; i < ist->nb_filters; i++) {
2187 if (!frame_sample_aspect->num)
2188 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2190 if (i < ist->nb_filters - 1) {
2191 f = ist->filter_frame;
2192 err = av_frame_ref(f, decoded_frame);
2197 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2198 if (ret == AVERROR_EOF) {
2199 ret = 0; /* ignore */
2200 } else if (ret < 0) {
2201 av_log(NULL, AV_LOG_FATAL,
2202 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2208 av_frame_unref(ist->filter_frame);
2209 av_frame_unref(decoded_frame);
2210 return err < 0 ? err : ret;
2213 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2215 AVSubtitle subtitle;
2216 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2217 &subtitle, got_output, pkt);
2219 check_decode_result(NULL, got_output, ret);
2221 if (ret < 0 || !*got_output) {
2223 sub2video_flush(ist);
2227 if (ist->fix_sub_duration) {
2229 if (ist->prev_sub.got_output) {
2230 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2231 1000, AV_TIME_BASE);
2232 if (end < ist->prev_sub.subtitle.end_display_time) {
2233 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2234 "Subtitle duration reduced from %d to %d%s\n",
2235 ist->prev_sub.subtitle.end_display_time, end,
2236 end <= 0 ? ", dropping it" : "");
2237 ist->prev_sub.subtitle.end_display_time = end;
2240 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2241 FFSWAP(int, ret, ist->prev_sub.ret);
2242 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2250 sub2video_update(ist, &subtitle);
2252 if (!subtitle.num_rects)
2255 ist->frames_decoded++;
2257 for (i = 0; i < nb_output_streams; i++) {
2258 OutputStream *ost = output_streams[i];
2260 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2261 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2264 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2268 avsubtitle_free(&subtitle);
2272 static int send_filter_eof(InputStream *ist)
2275 for (i = 0; i < ist->nb_filters; i++) {
2276 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2283 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2284 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2290 if (!ist->saw_first_ts) {
2291 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2293 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2294 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2295 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2297 ist->saw_first_ts = 1;
2300 if (ist->next_dts == AV_NOPTS_VALUE)
2301 ist->next_dts = ist->dts;
2302 if (ist->next_pts == AV_NOPTS_VALUE)
2303 ist->next_pts = ist->pts;
2307 av_init_packet(&avpkt);
2315 if (pkt->dts != AV_NOPTS_VALUE) {
2316 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2317 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2318 ist->next_pts = ist->pts = ist->dts;
2321 // while we have more to decode or while the decoder did output something on EOF
2322 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2326 ist->pts = ist->next_pts;
2327 ist->dts = ist->next_dts;
2329 if (avpkt.size && avpkt.size != pkt->size &&
2330 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2331 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2332 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2333 ist->showed_multi_packet_warning = 1;
2336 switch (ist->dec_ctx->codec_type) {
2337 case AVMEDIA_TYPE_AUDIO:
2338 ret = decode_audio (ist, &avpkt, &got_output);
2340 case AVMEDIA_TYPE_VIDEO:
2341 ret = decode_video (ist, &avpkt, &got_output);
2342 if (avpkt.duration) {
2343 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2344 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2345 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2346 duration = ((int64_t)AV_TIME_BASE *
2347 ist->dec_ctx->framerate.den * ticks) /
2348 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2352 if(ist->dts != AV_NOPTS_VALUE && duration) {
2353 ist->next_dts += duration;
2355 ist->next_dts = AV_NOPTS_VALUE;
2358 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2360 case AVMEDIA_TYPE_SUBTITLE:
2361 ret = transcode_subtitles(ist, &avpkt, &got_output);
2368 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2369 ist->file_index, ist->st->index, av_err2str(ret));
2376 avpkt.pts= AV_NOPTS_VALUE;
2378 // touch data and size only if not EOF
2380 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2388 if (got_output && !pkt)
2392 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2393 /* except when looping we need to flush but not to send an EOF */
2394 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2395 int ret = send_filter_eof(ist);
2397 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2402 /* handle stream copy */
2403 if (!ist->decoding_needed) {
2404 ist->dts = ist->next_dts;
2405 switch (ist->dec_ctx->codec_type) {
2406 case AVMEDIA_TYPE_AUDIO:
2407 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2408 ist->dec_ctx->sample_rate;
2410 case AVMEDIA_TYPE_VIDEO:
2411 if (ist->framerate.num) {
2412 // TODO: Remove work-around for c99-to-c89 issue 7
2413 AVRational time_base_q = AV_TIME_BASE_Q;
2414 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2415 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2416 } else if (pkt->duration) {
2417 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2418 } else if(ist->dec_ctx->framerate.num != 0) {
2419 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2420 ist->next_dts += ((int64_t)AV_TIME_BASE *
2421 ist->dec_ctx->framerate.den * ticks) /
2422 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2426 ist->pts = ist->dts;
2427 ist->next_pts = ist->next_dts;
2429 for (i = 0; pkt && i < nb_output_streams; i++) {
2430 OutputStream *ost = output_streams[i];
2432 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2435 do_streamcopy(ist, ost, pkt);
2441 static void print_sdp(void)
2446 AVIOContext *sdp_pb;
2447 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2451 for (i = 0, j = 0; i < nb_output_files; i++) {
2452 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2453 avc[j] = output_files[i]->ctx;
2461 av_sdp_create(avc, j, sdp, sizeof(sdp));
2463 if (!sdp_filename) {
2464 printf("SDP:\n%s\n", sdp);
2467 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2468 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2470 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2471 avio_closep(&sdp_pb);
2472 av_freep(&sdp_filename);
2480 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2483 for (i = 0; hwaccels[i].name; i++)
2484 if (hwaccels[i].pix_fmt == pix_fmt)
2485 return &hwaccels[i];
2489 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2491 InputStream *ist = s->opaque;
2492 const enum AVPixelFormat *p;
2495 for (p = pix_fmts; *p != -1; p++) {
2496 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2497 const HWAccel *hwaccel;
2499 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2502 hwaccel = get_hwaccel(*p);
2504 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2505 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2508 ret = hwaccel->init(s);
2510 if (ist->hwaccel_id == hwaccel->id) {
2511 av_log(NULL, AV_LOG_FATAL,
2512 "%s hwaccel requested for input stream #%d:%d, "
2513 "but cannot be initialized.\n", hwaccel->name,
2514 ist->file_index, ist->st->index);
2515 return AV_PIX_FMT_NONE;
2519 ist->active_hwaccel_id = hwaccel->id;
2520 ist->hwaccel_pix_fmt = *p;
2527 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2529 InputStream *ist = s->opaque;
2531 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2532 return ist->hwaccel_get_buffer(s, frame, flags);
2534 return avcodec_default_get_buffer2(s, frame, flags);
2537 static int init_input_stream(int ist_index, char *error, int error_len)
2540 InputStream *ist = input_streams[ist_index];
2542 if (ist->decoding_needed) {
2543 AVCodec *codec = ist->dec;
2545 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2546 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2547 return AVERROR(EINVAL);
2550 ist->dec_ctx->opaque = ist;
2551 ist->dec_ctx->get_format = get_format;
2552 ist->dec_ctx->get_buffer2 = get_buffer;
2553 ist->dec_ctx->thread_safe_callbacks = 1;
2555 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2556 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2557 (ist->decoding_needed & DECODING_FOR_OST)) {
2558 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2559 if (ist->decoding_needed & DECODING_FOR_FILTER)
2560 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2563 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2564 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2565 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2566 if (ret == AVERROR_EXPERIMENTAL)
2567 abort_codec_experimental(codec, 0);
2569 snprintf(error, error_len,
2570 "Error while opening decoder for input stream "
2572 ist->file_index, ist->st->index, av_err2str(ret));
2575 assert_avoptions(ist->decoder_opts);
2578 ist->next_pts = AV_NOPTS_VALUE;
2579 ist->next_dts = AV_NOPTS_VALUE;
2584 static InputStream *get_input_stream(OutputStream *ost)
2586 if (ost->source_index >= 0)
2587 return input_streams[ost->source_index];
2591 static int compare_int64(const void *a, const void *b)
2593 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2596 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2600 if (ost->encoding_needed) {
2601 AVCodec *codec = ost->enc;
2602 AVCodecContext *dec = NULL;
2605 if ((ist = get_input_stream(ost)))
2607 if (dec && dec->subtitle_header) {
2608 /* ASS code assumes this buffer is null terminated so add extra byte. */
2609 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2610 if (!ost->enc_ctx->subtitle_header)
2611 return AVERROR(ENOMEM);
2612 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2613 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2615 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2616 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2617 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2619 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2620 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2621 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2623 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2624 if (ret == AVERROR_EXPERIMENTAL)
2625 abort_codec_experimental(codec, 1);
2626 snprintf(error, error_len,
2627 "Error while opening encoder for output stream #%d:%d - "
2628 "maybe incorrect parameters such as bit_rate, rate, width or height",
2629 ost->file_index, ost->index);
2632 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2633 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2634 av_buffersink_set_frame_size(ost->filter->filter,
2635 ost->enc_ctx->frame_size);
2636 assert_avoptions(ost->encoder_opts);
2637 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2638 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2639 " It takes bits/s as argument, not kbits/s\n");
2641 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2643 av_log(NULL, AV_LOG_FATAL,
2644 "Error initializing the output stream codec context.\n");
2648 // copy timebase while removing common factors
2649 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2650 ost->st->codec->codec= ost->enc_ctx->codec;
2652 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2654 av_log(NULL, AV_LOG_FATAL,
2655 "Error setting up codec context options.\n");
2658 // copy timebase while removing common factors
2659 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2665 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2666 AVCodecContext *avctx)
2669 int n = 1, i, size, index = 0;
2672 for (p = kf; *p; p++)
2676 pts = av_malloc_array(size, sizeof(*pts));
2678 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2683 for (i = 0; i < n; i++) {
2684 char *next = strchr(p, ',');
2689 if (!memcmp(p, "chapters", 8)) {
2691 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2694 if (avf->nb_chapters > INT_MAX - size ||
2695 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2697 av_log(NULL, AV_LOG_FATAL,
2698 "Could not allocate forced key frames array.\n");
2701 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2702 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2704 for (j = 0; j < avf->nb_chapters; j++) {
2705 AVChapter *c = avf->chapters[j];
2706 av_assert1(index < size);
2707 pts[index++] = av_rescale_q(c->start, c->time_base,
2708 avctx->time_base) + t;
2713 t = parse_time_or_die("force_key_frames", p, 1);
2714 av_assert1(index < size);
2715 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2722 av_assert0(index == size);
2723 qsort(pts, size, sizeof(*pts), compare_int64);
2724 ost->forced_kf_count = size;
2725 ost->forced_kf_pts = pts;
2728 static void report_new_stream(int input_index, AVPacket *pkt)
2730 InputFile *file = input_files[input_index];
2731 AVStream *st = file->ctx->streams[pkt->stream_index];
2733 if (pkt->stream_index < file->nb_streams_warn)
2735 av_log(file->ctx, AV_LOG_WARNING,
2736 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2737 av_get_media_type_string(st->codec->codec_type),
2738 input_index, pkt->stream_index,
2739 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2740 file->nb_streams_warn = pkt->stream_index + 1;
2743 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2745 AVDictionaryEntry *e;
2747 uint8_t *encoder_string;
2748 int encoder_string_len;
2749 int format_flags = 0;
2750 int codec_flags = 0;
2752 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2755 e = av_dict_get(of->opts, "fflags", NULL, 0);
2757 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2760 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2762 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2764 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2767 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2770 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2771 encoder_string = av_mallocz(encoder_string_len);
2772 if (!encoder_string)
2775 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2776 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2778 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2779 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2780 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2781 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2784 static int transcode_init(void)
2786 int ret = 0, i, j, k;
2787 AVFormatContext *oc;
2790 char error[1024] = {0};
2793 for (i = 0; i < nb_filtergraphs; i++) {
2794 FilterGraph *fg = filtergraphs[i];
2795 for (j = 0; j < fg->nb_outputs; j++) {
2796 OutputFilter *ofilter = fg->outputs[j];
2797 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2799 if (fg->nb_inputs != 1)
2801 for (k = nb_input_streams-1; k >= 0 ; k--)
2802 if (fg->inputs[0]->ist == input_streams[k])
2804 ofilter->ost->source_index = k;
2808 /* init framerate emulation */
2809 for (i = 0; i < nb_input_files; i++) {
2810 InputFile *ifile = input_files[i];
2811 if (ifile->rate_emu)
2812 for (j = 0; j < ifile->nb_streams; j++)
2813 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2816 /* for each output stream, we compute the right encoding parameters */
2817 for (i = 0; i < nb_output_streams; i++) {
2818 AVCodecContext *enc_ctx;
2819 AVCodecContext *dec_ctx = NULL;
2820 ost = output_streams[i];
2821 oc = output_files[ost->file_index]->ctx;
2822 ist = get_input_stream(ost);
2824 if (ost->attachment_filename)
2827 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2830 dec_ctx = ist->dec_ctx;
2832 ost->st->disposition = ist->st->disposition;
2833 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2834 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2836 for (j=0; j<oc->nb_streams; j++) {
2837 AVStream *st = oc->streams[j];
2838 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2841 if (j == oc->nb_streams)
2842 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2843 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2846 if (ost->stream_copy) {
2848 uint64_t extra_size;
2850 av_assert0(ist && !ost->filter);
2852 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2854 if (extra_size > INT_MAX) {
2855 return AVERROR(EINVAL);
2858 /* if stream_copy is selected, no need to decode or encode */
2859 enc_ctx->codec_id = dec_ctx->codec_id;
2860 enc_ctx->codec_type = dec_ctx->codec_type;
2862 if (!enc_ctx->codec_tag) {
2863 unsigned int codec_tag;
2864 if (!oc->oformat->codec_tag ||
2865 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2866 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2867 enc_ctx->codec_tag = dec_ctx->codec_tag;
2870 enc_ctx->bit_rate = dec_ctx->bit_rate;
2871 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2872 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2873 enc_ctx->field_order = dec_ctx->field_order;
2874 if (dec_ctx->extradata_size) {
2875 enc_ctx->extradata = av_mallocz(extra_size);
2876 if (!enc_ctx->extradata) {
2877 return AVERROR(ENOMEM);
2879 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2881 enc_ctx->extradata_size= dec_ctx->extradata_size;
2882 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2884 enc_ctx->time_base = ist->st->time_base;
2886 * Avi is a special case here because it supports variable fps but
2887 * having the fps and timebase differe significantly adds quite some
2890 if(!strcmp(oc->oformat->name, "avi")) {
2891 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2892 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2893 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2894 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2896 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2897 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2898 enc_ctx->ticks_per_frame = 2;
2899 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2900 && av_q2d(ist->st->time_base) < 1.0/500
2902 enc_ctx->time_base = dec_ctx->time_base;
2903 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2904 enc_ctx->time_base.den *= 2;
2905 enc_ctx->ticks_per_frame = 2;
2907 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2908 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2909 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2910 && strcmp(oc->oformat->name, "f4v")
2912 if( copy_tb<0 && dec_ctx->time_base.den
2913 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2914 && av_q2d(ist->st->time_base) < 1.0/500
2916 enc_ctx->time_base = dec_ctx->time_base;
2917 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2920 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2921 && dec_ctx->time_base.num < dec_ctx->time_base.den
2922 && dec_ctx->time_base.num > 0
2923 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2924 enc_ctx->time_base = dec_ctx->time_base;
2927 if (!ost->frame_rate.num)
2928 ost->frame_rate = ist->framerate;
2929 if(ost->frame_rate.num)
2930 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2932 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2933 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2935 if (ist->st->nb_side_data) {
2936 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2937 sizeof(*ist->st->side_data));
2938 if (!ost->st->side_data)
2939 return AVERROR(ENOMEM);
2941 ost->st->nb_side_data = 0;
2942 for (j = 0; j < ist->st->nb_side_data; j++) {
2943 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2944 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2946 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2949 sd_dst->data = av_malloc(sd_src->size);
2951 return AVERROR(ENOMEM);
2952 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2953 sd_dst->size = sd_src->size;
2954 sd_dst->type = sd_src->type;
2955 ost->st->nb_side_data++;
2959 ost->parser = av_parser_init(enc_ctx->codec_id);
2961 switch (enc_ctx->codec_type) {
2962 case AVMEDIA_TYPE_AUDIO:
2963 if (audio_volume != 256) {
2964 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2967 enc_ctx->channel_layout = dec_ctx->channel_layout;
2968 enc_ctx->sample_rate = dec_ctx->sample_rate;
2969 enc_ctx->channels = dec_ctx->channels;
2970 enc_ctx->frame_size = dec_ctx->frame_size;
2971 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2972 enc_ctx->block_align = dec_ctx->block_align;
2973 enc_ctx->initial_padding = dec_ctx->delay;
2974 #if FF_API_AUDIOENC_DELAY
2975 enc_ctx->delay = dec_ctx->delay;
2977 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2978 enc_ctx->block_align= 0;
2979 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2980 enc_ctx->block_align= 0;
2982 case AVMEDIA_TYPE_VIDEO:
2983 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2984 enc_ctx->width = dec_ctx->width;
2985 enc_ctx->height = dec_ctx->height;
2986 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2987 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2989 av_mul_q(ost->frame_aspect_ratio,
2990 (AVRational){ enc_ctx->height, enc_ctx->width });
2991 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2992 "with stream copy may produce invalid files\n");
2994 else if (ist->st->sample_aspect_ratio.num)
2995 sar = ist->st->sample_aspect_ratio;
2997 sar = dec_ctx->sample_aspect_ratio;
2998 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2999 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3000 ost->st->r_frame_rate = ist->st->r_frame_rate;
3002 case AVMEDIA_TYPE_SUBTITLE:
3003 enc_ctx->width = dec_ctx->width;
3004 enc_ctx->height = dec_ctx->height;
3006 case AVMEDIA_TYPE_UNKNOWN:
3007 case AVMEDIA_TYPE_DATA:
3008 case AVMEDIA_TYPE_ATTACHMENT:
3015 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3017 /* should only happen when a default codec is not present. */
3018 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3019 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3020 ret = AVERROR(EINVAL);
3024 set_encoder_id(output_files[ost->file_index], ost);
3027 if (qsv_transcode_init(ost))
3032 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3033 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3035 fg = init_simple_filtergraph(ist, ost);
3036 if (configure_filtergraph(fg)) {
3037 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3042 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3043 if (!ost->frame_rate.num)
3044 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3045 if (ist && !ost->frame_rate.num)
3046 ost->frame_rate = ist->framerate;
3047 if (ist && !ost->frame_rate.num)
3048 ost->frame_rate = ist->st->r_frame_rate;
3049 if (ist && !ost->frame_rate.num) {
3050 ost->frame_rate = (AVRational){25, 1};
3051 av_log(NULL, AV_LOG_WARNING,
3053 "about the input framerate is available. Falling "
3054 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3055 "if you want a different framerate.\n",
3056 ost->file_index, ost->index);
3058 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3059 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3060 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3061 ost->frame_rate = ost->enc->supported_framerates[idx];
3063 // reduce frame rate for mpeg4 to be within the spec limits
3064 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3065 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3066 ost->frame_rate.num, ost->frame_rate.den, 65535);
3070 switch (enc_ctx->codec_type) {
3071 case AVMEDIA_TYPE_AUDIO:
3072 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3073 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3074 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3075 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3076 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3078 case AVMEDIA_TYPE_VIDEO:
3079 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3080 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3081 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3082 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3083 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3084 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3085 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3087 for (j = 0; j < ost->forced_kf_count; j++)
3088 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3090 enc_ctx->time_base);
3092 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3093 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3094 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3095 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3096 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3097 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3098 if (!strncmp(ost->enc->name, "libx264", 7) &&
3099 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3100 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3101 av_log(NULL, AV_LOG_WARNING,
3102 "No pixel format specified, %s for H.264 encoding chosen.\n"
3103 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3104 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3105 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3106 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3107 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3108 av_log(NULL, AV_LOG_WARNING,
3109 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3110 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3111 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3112 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3114 ost->st->avg_frame_rate = ost->frame_rate;
3117 enc_ctx->width != dec_ctx->width ||
3118 enc_ctx->height != dec_ctx->height ||
3119 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3120 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3123 if (ost->forced_keyframes) {
3124 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3125 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3126 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3128 av_log(NULL, AV_LOG_ERROR,
3129 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3132 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3133 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3134 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3137 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3138 // parse it only for static kf timings
3139 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3140 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3144 case AVMEDIA_TYPE_SUBTITLE:
3145 enc_ctx->time_base = (AVRational){1, 1000};
3146 if (!enc_ctx->width) {
3147 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3148 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3151 case AVMEDIA_TYPE_DATA:
3159 if (ost->disposition) {
3160 static const AVOption opts[] = {
3161 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3162 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3163 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3164 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3165 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3166 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3167 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3168 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3169 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3170 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3171 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3172 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3173 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3174 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3177 static const AVClass class = {
3179 .item_name = av_default_item_name,
3181 .version = LIBAVUTIL_VERSION_INT,
3183 const AVClass *pclass = &class;
3185 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3191 /* open each encoder */
3192 for (i = 0; i < nb_output_streams; i++) {
3193 ret = init_output_stream(output_streams[i], error, sizeof(error));
3198 /* init input streams */
3199 for (i = 0; i < nb_input_streams; i++)
3200 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3201 for (i = 0; i < nb_output_streams; i++) {
3202 ost = output_streams[i];
3203 avcodec_close(ost->enc_ctx);
3208 /* discard unused programs */
3209 for (i = 0; i < nb_input_files; i++) {
3210 InputFile *ifile = input_files[i];
3211 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3212 AVProgram *p = ifile->ctx->programs[j];
3213 int discard = AVDISCARD_ALL;
3215 for (k = 0; k < p->nb_stream_indexes; k++)
3216 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3217 discard = AVDISCARD_DEFAULT;
3220 p->discard = discard;
3224 /* open files and write file headers */
3225 for (i = 0; i < nb_output_files; i++) {
3226 oc = output_files[i]->ctx;
3227 oc->interrupt_callback = int_cb;
3228 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3229 snprintf(error, sizeof(error),
3230 "Could not write header for output file #%d "
3231 "(incorrect codec parameters ?): %s",
3232 i, av_err2str(ret));
3233 ret = AVERROR(EINVAL);
3236 // assert_avoptions(output_files[i]->opts);
3237 if (strcmp(oc->oformat->name, "rtp")) {
3243 /* dump the file output parameters - cannot be done before in case
3245 for (i = 0; i < nb_output_files; i++) {
3246 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3249 /* dump the stream mapping */
3250 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3251 for (i = 0; i < nb_input_streams; i++) {
3252 ist = input_streams[i];
3254 for (j = 0; j < ist->nb_filters; j++) {
3255 if (ist->filters[j]->graph->graph_desc) {
3256 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3257 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3258 ist->filters[j]->name);
3259 if (nb_filtergraphs > 1)
3260 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3261 av_log(NULL, AV_LOG_INFO, "\n");
3266 for (i = 0; i < nb_output_streams; i++) {
3267 ost = output_streams[i];
3269 if (ost->attachment_filename) {
3270 /* an attached file */
3271 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3272 ost->attachment_filename, ost->file_index, ost->index);
3276 if (ost->filter && ost->filter->graph->graph_desc) {
3277 /* output from a complex graph */
3278 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3279 if (nb_filtergraphs > 1)
3280 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3282 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3283 ost->index, ost->enc ? ost->enc->name : "?");
3287 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3288 input_streams[ost->source_index]->file_index,
3289 input_streams[ost->source_index]->st->index,
3292 if (ost->sync_ist != input_streams[ost->source_index])
3293 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3294 ost->sync_ist->file_index,
3295 ost->sync_ist->st->index);
3296 if (ost->stream_copy)
3297 av_log(NULL, AV_LOG_INFO, " (copy)");
3299 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3300 const AVCodec *out_codec = ost->enc;
3301 const char *decoder_name = "?";
3302 const char *in_codec_name = "?";
3303 const char *encoder_name = "?";
3304 const char *out_codec_name = "?";
3305 const AVCodecDescriptor *desc;
3308 decoder_name = in_codec->name;
3309 desc = avcodec_descriptor_get(in_codec->id);
3311 in_codec_name = desc->name;
3312 if (!strcmp(decoder_name, in_codec_name))
3313 decoder_name = "native";
3317 encoder_name = out_codec->name;
3318 desc = avcodec_descriptor_get(out_codec->id);
3320 out_codec_name = desc->name;
3321 if (!strcmp(encoder_name, out_codec_name))
3322 encoder_name = "native";
3325 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3326 in_codec_name, decoder_name,
3327 out_codec_name, encoder_name);
3329 av_log(NULL, AV_LOG_INFO, "\n");
3333 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3337 if (sdp_filename || want_sdp) {
3341 transcode_init_done = 1;
3346 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3347 static int need_output(void)
3351 for (i = 0; i < nb_output_streams; i++) {
3352 OutputStream *ost = output_streams[i];
3353 OutputFile *of = output_files[ost->file_index];
3354 AVFormatContext *os = output_files[ost->file_index]->ctx;
3356 if (ost->finished ||
3357 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3359 if (ost->frame_number >= ost->max_frames) {
3361 for (j = 0; j < of->ctx->nb_streams; j++)
3362 close_output_stream(output_streams[of->ost_index + j]);
3373 * Select the output stream to process.
3375 * @return selected output stream, or NULL if none available
3377 static OutputStream *choose_output(void)
3380 int64_t opts_min = INT64_MAX;
3381 OutputStream *ost_min = NULL;
3383 for (i = 0; i < nb_output_streams; i++) {
3384 OutputStream *ost = output_streams[i];
3385 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3386 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3388 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3389 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3391 if (!ost->finished && opts < opts_min) {
3393 ost_min = ost->unavailable ? NULL : ost;
3399 static int check_keyboard_interaction(int64_t cur_time)
3402 static int64_t last_time;
3403 if (received_nb_signals)
3404 return AVERROR_EXIT;
3405 /* read_key() returns 0 on EOF */
3406 if(cur_time - last_time >= 100000 && !run_as_daemon){
3408 last_time = cur_time;
3412 return AVERROR_EXIT;
3413 if (key == '+') av_log_set_level(av_log_get_level()+10);
3414 if (key == '-') av_log_set_level(av_log_get_level()-10);
3415 if (key == 's') qp_hist ^= 1;
3418 do_hex_dump = do_pkt_dump = 0;
3419 } else if(do_pkt_dump){
3423 av_log_set_level(AV_LOG_DEBUG);
3425 if (key == 'c' || key == 'C'){
3426 char buf[4096], target[64], command[256], arg[256] = {0};
3429 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3431 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3436 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3437 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3438 target, time, command, arg);
3439 for (i = 0; i < nb_filtergraphs; i++) {
3440 FilterGraph *fg = filtergraphs[i];
3443 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3444 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3445 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3446 } else if (key == 'c') {
3447 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3448 ret = AVERROR_PATCHWELCOME;
3450 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3452 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3457 av_log(NULL, AV_LOG_ERROR,
3458 "Parse error, at least 3 arguments were expected, "
3459 "only %d given in string '%s'\n", n, buf);
3462 if (key == 'd' || key == 'D'){
3465 debug = input_streams[0]->st->codec->debug<<1;
3466 if(!debug) debug = 1;
3467 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3473 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3477 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3478 fprintf(stderr,"error parsing debug value\n");
3480 for(i=0;i<nb_input_streams;i++) {
3481 input_streams[i]->st->codec->debug = debug;
3483 for(i=0;i<nb_output_streams;i++) {
3484 OutputStream *ost = output_streams[i];
3485 ost->enc_ctx->debug = debug;
3487 if(debug) av_log_set_level(AV_LOG_DEBUG);
3488 fprintf(stderr,"debug=%d\n", debug);
3491 fprintf(stderr, "key function\n"
3492 "? show this help\n"
3493 "+ increase verbosity\n"
3494 "- decrease verbosity\n"
3495 "c Send command to first matching filter supporting it\n"
3496 "C Send/Que command to all matching filters\n"
3497 "D cycle through available debug modes\n"
3498 "h dump packets/hex press to cycle through the 3 states\n"
3500 "s Show QP histogram\n"
3507 static void *input_thread(void *arg)
3510 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3515 ret = av_read_frame(f->ctx, &pkt);
3517 if (ret == AVERROR(EAGAIN)) {
3522 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3525 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3526 if (flags && ret == AVERROR(EAGAIN)) {
3528 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3529 av_log(f->ctx, AV_LOG_WARNING,
3530 "Thread message queue blocking; consider raising the "
3531 "thread_queue_size option (current value: %d)\n",
3532 f->thread_queue_size);
3535 if (ret != AVERROR_EOF)
3536 av_log(f->ctx, AV_LOG_ERROR,
3537 "Unable to send packet to main thread: %s\n",
3539 av_packet_unref(&pkt);
3540 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3548 static void free_input_threads(void)
3552 for (i = 0; i < nb_input_files; i++) {
3553 InputFile *f = input_files[i];
3556 if (!f || !f->in_thread_queue)
3558 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3559 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3560 av_packet_unref(&pkt);
3562 pthread_join(f->thread, NULL);
3564 av_thread_message_queue_free(&f->in_thread_queue);
3568 static int init_input_threads(void)
3572 if (nb_input_files == 1)
3575 for (i = 0; i < nb_input_files; i++) {
3576 InputFile *f = input_files[i];
3578 if (f->ctx->pb ? !f->ctx->pb->seekable :
3579 strcmp(f->ctx->iformat->name, "lavfi"))
3580 f->non_blocking = 1;
3581 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3582 f->thread_queue_size, sizeof(AVPacket));
3586 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3587 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3588 av_thread_message_queue_free(&f->in_thread_queue);
3589 return AVERROR(ret);
3595 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3597 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3599 AV_THREAD_MESSAGE_NONBLOCK : 0);
3603 static int get_input_packet(InputFile *f, AVPacket *pkt)
3607 for (i = 0; i < f->nb_streams; i++) {
3608 InputStream *ist = input_streams[f->ist_index + i];
3609 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3610 int64_t now = av_gettime_relative() - ist->start;
3612 return AVERROR(EAGAIN);
3617 if (nb_input_files > 1)
3618 return get_input_packet_mt(f, pkt);
3620 return av_read_frame(f->ctx, pkt);
3623 static int got_eagain(void)
3626 for (i = 0; i < nb_output_streams; i++)
3627 if (output_streams[i]->unavailable)
3632 static void reset_eagain(void)
3635 for (i = 0; i < nb_input_files; i++)
3636 input_files[i]->eagain = 0;
3637 for (i = 0; i < nb_output_streams; i++)
3638 output_streams[i]->unavailable = 0;
3641 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3642 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3643 AVRational time_base)
3649 return tmp_time_base;
3652 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3655 return tmp_time_base;
3661 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3664 AVCodecContext *avctx;
3665 int i, ret, has_audio = 0;
3666 int64_t duration = 0;
3668 ret = av_seek_frame(is, -1, is->start_time, 0);
3672 for (i = 0; i < ifile->nb_streams; i++) {
3673 ist = input_streams[ifile->ist_index + i];
3674 avctx = ist->dec_ctx;
3677 if (ist->decoding_needed) {
3678 process_input_packet(ist, NULL, 1);
3679 avcodec_flush_buffers(avctx);
3682 /* duration is the length of the last frame in a stream
3683 * when audio stream is present we don't care about
3684 * last video frame length because it's not defined exactly */
3685 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3689 for (i = 0; i < ifile->nb_streams; i++) {
3690 ist = input_streams[ifile->ist_index + i];
3691 avctx = ist->dec_ctx;
3694 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3695 AVRational sample_rate = {1, avctx->sample_rate};
3697 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3701 if (ist->framerate.num) {
3702 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3703 } else if (ist->st->avg_frame_rate.num) {
3704 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3705 } else duration = 1;
3707 if (!ifile->duration)
3708 ifile->time_base = ist->st->time_base;
3709 /* the total duration of the stream, max_pts - min_pts is
3710 * the duration of the stream without the last frame */
3711 duration += ist->max_pts - ist->min_pts;
3712 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3716 if (ifile->loop > 0)
3724 * - 0 -- one packet was read and processed
3725 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3726 * this function should be called again
3727 * - AVERROR_EOF -- this function should not be called again
3729 static int process_input(int file_index)
3731 InputFile *ifile = input_files[file_index];
3732 AVFormatContext *is;
3739 ret = get_input_packet(ifile, &pkt);
3741 if (ret == AVERROR(EAGAIN)) {
3745 if (ret < 0 && ifile->loop) {
3746 if ((ret = seek_to_start(ifile, is)) < 0)
3748 ret = get_input_packet(ifile, &pkt);
3751 if (ret != AVERROR_EOF) {
3752 print_error(is->filename, ret);
3757 for (i = 0; i < ifile->nb_streams; i++) {
3758 ist = input_streams[ifile->ist_index + i];
3759 if (ist->decoding_needed) {
3760 ret = process_input_packet(ist, NULL, 0);
3765 /* mark all outputs that don't go through lavfi as finished */
3766 for (j = 0; j < nb_output_streams; j++) {
3767 OutputStream *ost = output_streams[j];
3769 if (ost->source_index == ifile->ist_index + i &&
3770 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3771 finish_output_stream(ost);
3775 ifile->eof_reached = 1;
3776 return AVERROR(EAGAIN);
3782 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3783 is->streams[pkt.stream_index]);
3785 /* the following test is needed in case new streams appear
3786 dynamically in stream : we ignore them */
3787 if (pkt.stream_index >= ifile->nb_streams) {
3788 report_new_stream(file_index, &pkt);
3789 goto discard_packet;
3792 ist = input_streams[ifile->ist_index + pkt.stream_index];
3794 ist->data_size += pkt.size;
3798 goto discard_packet;
3800 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3801 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3806 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3807 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3808 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3809 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3810 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3811 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3812 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3813 av_ts2str(input_files[ist->file_index]->ts_offset),
3814 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3817 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3818 int64_t stime, stime2;
3819 // Correcting starttime based on the enabled streams
3820 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3821 // so we instead do it here as part of discontinuity handling
3822 if ( ist->next_dts == AV_NOPTS_VALUE
3823 && ifile->ts_offset == -is->start_time
3824 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3825 int64_t new_start_time = INT64_MAX;
3826 for (i=0; i<is->nb_streams; i++) {
3827 AVStream *st = is->streams[i];
3828 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3830 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3832 if (new_start_time > is->start_time) {
3833 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3834 ifile->ts_offset = -new_start_time;
3838 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3839 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3840 ist->wrap_correction_done = 1;
3842 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3843 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3844 ist->wrap_correction_done = 0;
3846 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3847 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3848 ist->wrap_correction_done = 0;
3852 /* add the stream-global side data to the first packet */
3853 if (ist->nb_packets == 1) {
3854 if (ist->st->nb_side_data)
3855 av_packet_split_side_data(&pkt);
3856 for (i = 0; i < ist->st->nb_side_data; i++) {
3857 AVPacketSideData *src_sd = &ist->st->side_data[i];
3860 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3862 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3865 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3869 memcpy(dst_data, src_sd->data, src_sd->size);
3873 if (pkt.dts != AV_NOPTS_VALUE)
3874 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3875 if (pkt.pts != AV_NOPTS_VALUE)
3876 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3878 if (pkt.pts != AV_NOPTS_VALUE)
3879 pkt.pts *= ist->ts_scale;
3880 if (pkt.dts != AV_NOPTS_VALUE)
3881 pkt.dts *= ist->ts_scale;
3883 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3884 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3885 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3886 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3887 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3888 int64_t delta = pkt_dts - ifile->last_ts;
3889 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3890 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3891 ifile->ts_offset -= delta;
3892 av_log(NULL, AV_LOG_DEBUG,
3893 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3894 delta, ifile->ts_offset);
3895 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3896 if (pkt.pts != AV_NOPTS_VALUE)
3897 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3901 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3902 if (pkt.pts != AV_NOPTS_VALUE) {
3903 pkt.pts += duration;
3904 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3905 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3908 if (pkt.dts != AV_NOPTS_VALUE)
3909 pkt.dts += duration;
3911 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3912 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3913 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3915 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3916 int64_t delta = pkt_dts - ist->next_dts;
3917 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3918 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3919 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3920 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3921 ifile->ts_offset -= delta;
3922 av_log(NULL, AV_LOG_DEBUG,
3923 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3924 delta, ifile->ts_offset);
3925 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3926 if (pkt.pts != AV_NOPTS_VALUE)
3927 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3930 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3931 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3932 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3933 pkt.dts = AV_NOPTS_VALUE;
3935 if (pkt.pts != AV_NOPTS_VALUE){
3936 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3937 delta = pkt_pts - ist->next_dts;
3938 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3939 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3940 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3941 pkt.pts = AV_NOPTS_VALUE;
3947 if (pkt.dts != AV_NOPTS_VALUE)
3948 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3951 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3952 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3953 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3954 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3955 av_ts2str(input_files[ist->file_index]->ts_offset),
3956 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3959 sub2video_heartbeat(ist, pkt.pts);
3961 process_input_packet(ist, &pkt, 0);
3964 av_packet_unref(&pkt);
3970 * Perform a step of transcoding for the specified filter graph.
3972 * @param[in] graph filter graph to consider
3973 * @param[out] best_ist input stream where a frame would allow to continue
3974 * @return 0 for success, <0 for error
3976 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3979 int nb_requests, nb_requests_max = 0;
3980 InputFilter *ifilter;
3984 ret = avfilter_graph_request_oldest(graph->graph);
3986 return reap_filters(0);
3988 if (ret == AVERROR_EOF) {
3989 ret = reap_filters(1);
3990 for (i = 0; i < graph->nb_outputs; i++)
3991 close_output_stream(graph->outputs[i]->ost);
3994 if (ret != AVERROR(EAGAIN))
3997 for (i = 0; i < graph->nb_inputs; i++) {
3998 ifilter = graph->inputs[i];
4000 if (input_files[ist->file_index]->eagain ||
4001 input_files[ist->file_index]->eof_reached)
4003 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4004 if (nb_requests > nb_requests_max) {
4005 nb_requests_max = nb_requests;
4011 for (i = 0; i < graph->nb_outputs; i++)
4012 graph->outputs[i]->ost->unavailable = 1;
4018 * Run a single step of transcoding.
4020 * @return 0 for success, <0 for error
4022 static int transcode_step(void)
4028 ost = choose_output();
4035 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4040 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4045 av_assert0(ost->source_index >= 0);
4046 ist = input_streams[ost->source_index];
4049 ret = process_input(ist->file_index);
4050 if (ret == AVERROR(EAGAIN)) {
4051 if (input_files[ist->file_index]->eagain)
4052 ost->unavailable = 1;
4057 return ret == AVERROR_EOF ? 0 : ret;
4059 return reap_filters(0);
4063 * The following code is the main loop of the file converter
4065 static int transcode(void)
4068 AVFormatContext *os;
4071 int64_t timer_start;
4072 int64_t total_packets_written = 0;
4074 ret = transcode_init();
4078 if (stdin_interaction) {
4079 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4082 timer_start = av_gettime_relative();
4085 if ((ret = init_input_threads()) < 0)
4089 while (!received_sigterm) {
4090 int64_t cur_time= av_gettime_relative();
4092 /* if 'q' pressed, exits */
4093 if (stdin_interaction)
4094 if (check_keyboard_interaction(cur_time) < 0)
4097 /* check if there's any stream where output is still needed */
4098 if (!need_output()) {
4099 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4103 ret = transcode_step();
4105 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4109 av_strerror(ret, errbuf, sizeof(errbuf));
4111 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4116 /* dump report by using the output first video and audio streams */
4117 print_report(0, timer_start, cur_time);
4120 free_input_threads();
4123 /* at the end of stream, we must flush the decoder buffers */
4124 for (i = 0; i < nb_input_streams; i++) {
4125 ist = input_streams[i];
4126 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4127 process_input_packet(ist, NULL, 0);
4134 /* write the trailer if needed and close file */
4135 for (i = 0; i < nb_output_files; i++) {
4136 os = output_files[i]->ctx;
4137 if ((ret = av_write_trailer(os)) < 0) {
4138 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4144 /* dump report by using the first video and audio streams */
4145 print_report(1, timer_start, av_gettime_relative());
4147 /* close each encoder */
4148 for (i = 0; i < nb_output_streams; i++) {
4149 ost = output_streams[i];
4150 if (ost->encoding_needed) {
4151 av_freep(&ost->enc_ctx->stats_in);
4153 total_packets_written += ost->packets_written;
4156 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4157 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4161 /* close each decoder */
4162 for (i = 0; i < nb_input_streams; i++) {
4163 ist = input_streams[i];
4164 if (ist->decoding_needed) {
4165 avcodec_close(ist->dec_ctx);
4166 if (ist->hwaccel_uninit)
4167 ist->hwaccel_uninit(ist->dec_ctx);
4176 free_input_threads();
4179 if (output_streams) {
4180 for (i = 0; i < nb_output_streams; i++) {
4181 ost = output_streams[i];
4184 fclose(ost->logfile);
4185 ost->logfile = NULL;
4187 av_freep(&ost->forced_kf_pts);
4188 av_freep(&ost->apad);
4189 av_freep(&ost->disposition);
4190 av_dict_free(&ost->encoder_opts);
4191 av_dict_free(&ost->sws_dict);
4192 av_dict_free(&ost->swr_opts);
4193 av_dict_free(&ost->resample_opts);
4194 av_dict_free(&ost->bsf_args);
4202 static int64_t getutime(void)
4205 struct rusage rusage;
4207 getrusage(RUSAGE_SELF, &rusage);
4208 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4209 #elif HAVE_GETPROCESSTIMES
4211 FILETIME c, e, k, u;
4212 proc = GetCurrentProcess();
4213 GetProcessTimes(proc, &c, &e, &k, &u);
4214 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4216 return av_gettime_relative();
4220 static int64_t getmaxrss(void)
4222 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4223 struct rusage rusage;
4224 getrusage(RUSAGE_SELF, &rusage);
4225 return (int64_t)rusage.ru_maxrss * 1024;
4226 #elif HAVE_GETPROCESSMEMORYINFO
4228 PROCESS_MEMORY_COUNTERS memcounters;
4229 proc = GetCurrentProcess();
4230 memcounters.cb = sizeof(memcounters);
4231 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4232 return memcounters.PeakPagefileUsage;
4238 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4242 int main(int argc, char **argv)
4247 register_exit(ffmpeg_cleanup);
4249 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4251 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4252 parse_loglevel(argc, argv, options);
4254 if(argc>1 && !strcmp(argv[1], "-d")){
4256 av_log_set_callback(log_callback_null);
4261 avcodec_register_all();
4263 avdevice_register_all();
4265 avfilter_register_all();
4267 avformat_network_init();
4269 show_banner(argc, argv, options);
4273 /* parse options and open all input/output files */
4274 ret = ffmpeg_parse_options(argc, argv);
4278 if (nb_output_files <= 0 && nb_input_files == 0) {
4280 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4284 /* file converter / grab */
4285 if (nb_output_files <= 0) {
4286 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4290 // if (nb_input_files == 0) {
4291 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4295 current_time = ti = getutime();
4296 if (transcode() < 0)
4298 ti = getutime() - ti;
4300 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4302 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4303 decode_error_stat[0], decode_error_stat[1]);
4304 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4307 exit_program(received_nb_signals ? 255 : main_return_code);
4308 return main_return_code;