2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
558 if (fclose(vstats_file))
559 av_log(NULL, AV_LOG_ERROR,
560 "Error closing vstats file, loss of information possible: %s\n",
561 av_err2str(AVERROR(errno)));
563 av_freep(&vstats_filename);
565 av_freep(&input_streams);
566 av_freep(&input_files);
567 av_freep(&output_streams);
568 av_freep(&output_files);
572 avformat_network_deinit();
574 if (received_sigterm) {
575 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
576 (int) received_sigterm);
577 } else if (ret && transcode_init_done) {
578 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
584 void remove_avoptions(AVDictionary **a, AVDictionary *b)
586 AVDictionaryEntry *t = NULL;
588 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
589 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
593 void assert_avoptions(AVDictionary *m)
595 AVDictionaryEntry *t;
596 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
597 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
602 static void abort_codec_experimental(AVCodec *c, int encoder)
607 static void update_benchmark(const char *fmt, ...)
609 if (do_benchmark_all) {
610 int64_t t = getutime();
616 vsnprintf(buf, sizeof(buf), fmt, va);
618 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
624 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 for (i = 0; i < nb_output_streams; i++) {
628 OutputStream *ost2 = output_streams[i];
629 ost2->finished |= ost == ost2 ? this_stream : others;
633 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
635 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
636 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
640 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
641 if (ost->st->codec->extradata) {
642 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
643 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
647 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
648 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
649 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652 * Audio encoders may split the packets -- #frames in != #packets out.
653 * But there is no reordering, so we can limit the number of output packets
654 * by simply dropping them here.
655 * Counting encoded video frames needs to be done separately because of
656 * reordering, see do_video_out()
658 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
659 if (ost->frame_number >= ost->max_frames) {
660 av_packet_unref(pkt);
665 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
667 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
669 ost->quality = sd ? AV_RL32(sd) : -1;
670 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
672 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
674 ost->error[i] = AV_RL64(sd + 8 + 8*i);
679 if (ost->frame_rate.num && ost->is_cfr) {
680 if (pkt->duration > 0)
681 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
682 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
688 av_packet_split_side_data(pkt);
690 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
691 print_error("", ret);
696 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
697 if (pkt->dts != AV_NOPTS_VALUE &&
698 pkt->pts != AV_NOPTS_VALUE &&
699 pkt->dts > pkt->pts) {
700 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
702 ost->file_index, ost->st->index);
704 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
705 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
706 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
709 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
710 pkt->dts != AV_NOPTS_VALUE &&
711 ost->last_mux_dts != AV_NOPTS_VALUE) {
712 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
713 if (pkt->dts < max) {
714 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
715 av_log(s, loglevel, "Non-monotonous DTS in output stream "
716 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
717 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
719 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
722 av_log(s, loglevel, "changing to %"PRId64". This may result "
723 "in incorrect timestamps in the output file.\n",
725 if(pkt->pts >= pkt->dts)
726 pkt->pts = FFMAX(pkt->pts, max);
731 ost->last_mux_dts = pkt->dts;
733 ost->data_size += pkt->size;
734 ost->packets_written++;
736 pkt->stream_index = ost->index;
739 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
740 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
741 av_get_media_type_string(ost->enc_ctx->codec_type),
742 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
743 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
748 ret = av_interleaved_write_frame(s, pkt);
750 print_error("av_interleaved_write_frame()", ret);
751 main_return_code = 1;
752 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
754 av_packet_unref(pkt);
757 static void close_output_stream(OutputStream *ost)
759 OutputFile *of = output_files[ost->file_index];
761 ost->finished |= ENCODER_FINISHED;
763 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
764 of->recording_time = FFMIN(of->recording_time, end);
768 static int check_recording_time(OutputStream *ost)
770 OutputFile *of = output_files[ost->file_index];
772 if (of->recording_time != INT64_MAX &&
773 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
774 AV_TIME_BASE_Q) >= 0) {
775 close_output_stream(ost);
781 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
784 AVCodecContext *enc = ost->enc_ctx;
788 av_init_packet(&pkt);
792 if (!check_recording_time(ost))
795 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
796 frame->pts = ost->sync_opts;
797 ost->sync_opts = frame->pts + frame->nb_samples;
798 ost->samples_encoded += frame->nb_samples;
799 ost->frames_encoded++;
801 av_assert0(pkt.size || !pkt.data);
802 update_benchmark(NULL);
804 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
805 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
806 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
807 enc->time_base.num, enc->time_base.den);
810 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
811 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
814 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
817 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
820 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
821 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
822 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
823 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
826 write_frame(s, &pkt, ost);
830 static void do_subtitle_out(AVFormatContext *s,
835 int subtitle_out_max_size = 1024 * 1024;
836 int subtitle_out_size, nb, i;
841 if (sub->pts == AV_NOPTS_VALUE) {
842 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
851 subtitle_out = av_malloc(subtitle_out_max_size);
853 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
858 /* Note: DVB subtitle need one packet to draw them and one other
859 packet to clear them */
860 /* XXX: signal it in the codec context ? */
861 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
866 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
868 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
869 pts -= output_files[ost->file_index]->start_time;
870 for (i = 0; i < nb; i++) {
871 unsigned save_num_rects = sub->num_rects;
873 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
874 if (!check_recording_time(ost))
878 // start_display_time is required to be 0
879 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
880 sub->end_display_time -= sub->start_display_time;
881 sub->start_display_time = 0;
885 ost->frames_encoded++;
887 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
888 subtitle_out_max_size, sub);
890 sub->num_rects = save_num_rects;
891 if (subtitle_out_size < 0) {
892 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
896 av_init_packet(&pkt);
897 pkt.data = subtitle_out;
898 pkt.size = subtitle_out_size;
899 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
900 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
901 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
902 /* XXX: the pts correction is handled here. Maybe handling
903 it in the codec would be better */
905 pkt.pts += 90 * sub->start_display_time;
907 pkt.pts += 90 * sub->end_display_time;
910 write_frame(s, &pkt, ost);
914 static void do_video_out(AVFormatContext *s,
916 AVFrame *next_picture,
919 int ret, format_video_sync;
921 AVCodecContext *enc = ost->enc_ctx;
922 AVCodecContext *mux_enc = ost->st->codec;
923 int nb_frames, nb0_frames, i;
924 double delta, delta0;
927 InputStream *ist = NULL;
928 AVFilterContext *filter = ost->filter->filter;
930 if (ost->source_index >= 0)
931 ist = input_streams[ost->source_index];
933 if (filter->inputs[0]->frame_rate.num > 0 &&
934 filter->inputs[0]->frame_rate.den > 0)
935 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
937 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
938 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
940 if (!ost->filters_script &&
944 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
945 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
950 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
951 ost->last_nb0_frames[1],
952 ost->last_nb0_frames[2]);
954 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
955 delta = delta0 + duration;
957 /* by default, we output a single frame */
958 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
961 format_video_sync = video_sync_method;
962 if (format_video_sync == VSYNC_AUTO) {
963 if(!strcmp(s->oformat->name, "avi")) {
964 format_video_sync = VSYNC_VFR;
966 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
968 && format_video_sync == VSYNC_CFR
969 && input_files[ist->file_index]->ctx->nb_streams == 1
970 && input_files[ist->file_index]->input_ts_offset == 0) {
971 format_video_sync = VSYNC_VSCFR;
973 if (format_video_sync == VSYNC_CFR && copy_ts) {
974 format_video_sync = VSYNC_VSCFR;
977 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
981 format_video_sync != VSYNC_PASSTHROUGH &&
982 format_video_sync != VSYNC_DROP) {
984 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
986 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
987 sync_ipts = ost->sync_opts;
992 switch (format_video_sync) {
994 if (ost->frame_number == 0 && delta0 >= 0.5) {
995 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
998 ost->sync_opts = lrint(sync_ipts);
1001 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1002 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1004 } else if (delta < -1.1)
1006 else if (delta > 1.1) {
1007 nb_frames = lrintf(delta);
1009 nb0_frames = lrintf(delta0 - 0.6);
1015 else if (delta > 0.6)
1016 ost->sync_opts = lrint(sync_ipts);
1019 case VSYNC_PASSTHROUGH:
1020 ost->sync_opts = lrint(sync_ipts);
1027 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1028 nb0_frames = FFMIN(nb0_frames, nb_frames);
1030 memmove(ost->last_nb0_frames + 1,
1031 ost->last_nb0_frames,
1032 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1033 ost->last_nb0_frames[0] = nb0_frames;
1035 if (nb0_frames == 0 && ost->last_dropped) {
1037 av_log(NULL, AV_LOG_VERBOSE,
1038 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1039 ost->frame_number, ost->st->index, ost->last_frame->pts);
1041 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1042 if (nb_frames > dts_error_threshold * 30) {
1043 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1047 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1048 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1050 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1052 /* duplicates frame if needed */
1053 for (i = 0; i < nb_frames; i++) {
1054 AVFrame *in_picture;
1055 av_init_packet(&pkt);
1059 if (i < nb0_frames && ost->last_frame) {
1060 in_picture = ost->last_frame;
1062 in_picture = next_picture;
1067 in_picture->pts = ost->sync_opts;
1070 if (!check_recording_time(ost))
1072 if (ost->frame_number >= ost->max_frames)
1076 #if FF_API_LAVF_FMT_RAWPICTURE
1077 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1078 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1079 /* raw pictures are written as AVPicture structure to
1080 avoid any copies. We support temporarily the older
1082 if (in_picture->interlaced_frame)
1083 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1085 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1086 pkt.data = (uint8_t *)in_picture;
1087 pkt.size = sizeof(AVPicture);
1088 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1089 pkt.flags |= AV_PKT_FLAG_KEY;
1091 write_frame(s, &pkt, ost);
1095 int got_packet, forced_keyframe = 0;
1098 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1099 ost->top_field_first >= 0)
1100 in_picture->top_field_first = !!ost->top_field_first;
1102 if (in_picture->interlaced_frame) {
1103 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1104 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1106 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1108 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1110 in_picture->quality = enc->global_quality;
1111 in_picture->pict_type = 0;
1113 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1114 in_picture->pts * av_q2d(enc->time_base) : NAN;
1115 if (ost->forced_kf_index < ost->forced_kf_count &&
1116 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1117 ost->forced_kf_index++;
1118 forced_keyframe = 1;
1119 } else if (ost->forced_keyframes_pexpr) {
1121 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1122 res = av_expr_eval(ost->forced_keyframes_pexpr,
1123 ost->forced_keyframes_expr_const_values, NULL);
1124 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1125 ost->forced_keyframes_expr_const_values[FKF_N],
1126 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1127 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1128 ost->forced_keyframes_expr_const_values[FKF_T],
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1132 forced_keyframe = 1;
1133 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1134 ost->forced_keyframes_expr_const_values[FKF_N];
1135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1136 ost->forced_keyframes_expr_const_values[FKF_T];
1137 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1140 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1141 } else if ( ost->forced_keyframes
1142 && !strncmp(ost->forced_keyframes, "source", 6)
1143 && in_picture->key_frame==1) {
1144 forced_keyframe = 1;
1147 if (forced_keyframe) {
1148 in_picture->pict_type = AV_PICTURE_TYPE_I;
1149 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1152 update_benchmark(NULL);
1154 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1155 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1156 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1157 enc->time_base.num, enc->time_base.den);
1160 ost->frames_encoded++;
1162 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1163 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1165 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1171 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1172 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1173 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1174 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1177 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1178 pkt.pts = ost->sync_opts;
1180 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1183 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1184 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1185 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1186 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1189 frame_size = pkt.size;
1190 write_frame(s, &pkt, ost);
1192 /* if two pass, output log */
1193 if (ost->logfile && enc->stats_out) {
1194 fprintf(ost->logfile, "%s", enc->stats_out);
1200 * For video, number of frames in == number of packets out.
1201 * But there may be reordering, so we can't throw away frames on encoder
1202 * flush, we need to limit them here, before they go into encoder.
1204 ost->frame_number++;
1206 if (vstats_filename && frame_size)
1207 do_video_stats(ost, frame_size);
1210 if (!ost->last_frame)
1211 ost->last_frame = av_frame_alloc();
1212 av_frame_unref(ost->last_frame);
1213 if (next_picture && ost->last_frame)
1214 av_frame_ref(ost->last_frame, next_picture);
1216 av_frame_free(&ost->last_frame);
1219 static double psnr(double d)
1221 return -10.0 * log10(d);
1224 static void do_video_stats(OutputStream *ost, int frame_size)
1226 AVCodecContext *enc;
1228 double ti1, bitrate, avg_bitrate;
1230 /* this is executed just the first time do_video_stats is called */
1232 vstats_file = fopen(vstats_filename, "w");
1240 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1241 frame_number = ost->st->nb_frames;
1242 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1243 ost->quality / (float)FF_QP2LAMBDA);
1245 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1246 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1248 fprintf(vstats_file,"f_size= %6d ", frame_size);
1249 /* compute pts value */
1250 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1254 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1255 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1256 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1257 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1258 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1262 static void finish_output_stream(OutputStream *ost)
1264 OutputFile *of = output_files[ost->file_index];
1267 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1270 for (i = 0; i < of->ctx->nb_streams; i++)
1271 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1276 * Get and encode new output from any of the filtergraphs, without causing
1279 * @return 0 for success, <0 for severe errors
1281 static int reap_filters(int flush)
1283 AVFrame *filtered_frame = NULL;
1286 /* Reap all buffers present in the buffer sinks */
1287 for (i = 0; i < nb_output_streams; i++) {
1288 OutputStream *ost = output_streams[i];
1289 OutputFile *of = output_files[ost->file_index];
1290 AVFilterContext *filter;
1291 AVCodecContext *enc = ost->enc_ctx;
1296 filter = ost->filter->filter;
1298 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1299 return AVERROR(ENOMEM);
1301 filtered_frame = ost->filtered_frame;
1304 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1305 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1306 AV_BUFFERSINK_FLAG_NO_REQUEST);
1308 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1309 av_log(NULL, AV_LOG_WARNING,
1310 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1311 } else if (flush && ret == AVERROR_EOF) {
1312 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1313 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1317 if (ost->finished) {
1318 av_frame_unref(filtered_frame);
1321 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1322 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1323 AVRational tb = enc->time_base;
1324 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1326 tb.den <<= extra_bits;
1328 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1329 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1330 float_pts /= 1 << extra_bits;
1331 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1332 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1334 filtered_frame->pts =
1335 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1336 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1338 //if (ost->source_index >= 0)
1339 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1341 switch (filter->inputs[0]->type) {
1342 case AVMEDIA_TYPE_VIDEO:
1343 if (!ost->frame_aspect_ratio.num)
1344 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1347 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1348 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1350 enc->time_base.num, enc->time_base.den);
1353 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1355 case AVMEDIA_TYPE_AUDIO:
1356 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1357 enc->channels != av_frame_get_channels(filtered_frame)) {
1358 av_log(NULL, AV_LOG_ERROR,
1359 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1362 do_audio_out(of->ctx, ost, filtered_frame);
1365 // TODO support subtitle filters
1369 av_frame_unref(filtered_frame);
1376 static void print_final_stats(int64_t total_size)
1378 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1379 uint64_t subtitle_size = 0;
1380 uint64_t data_size = 0;
1381 float percent = -1.0;
1385 for (i = 0; i < nb_output_streams; i++) {
1386 OutputStream *ost = output_streams[i];
1387 switch (ost->enc_ctx->codec_type) {
1388 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1389 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1390 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1391 default: other_size += ost->data_size; break;
1393 extra_size += ost->enc_ctx->extradata_size;
1394 data_size += ost->data_size;
1395 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1396 != AV_CODEC_FLAG_PASS1)
1400 if (data_size && total_size>0 && total_size >= data_size)
1401 percent = 100.0 * (total_size - data_size) / data_size;
1403 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1404 video_size / 1024.0,
1405 audio_size / 1024.0,
1406 subtitle_size / 1024.0,
1407 other_size / 1024.0,
1408 extra_size / 1024.0);
1410 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1412 av_log(NULL, AV_LOG_INFO, "unknown");
1413 av_log(NULL, AV_LOG_INFO, "\n");
1415 /* print verbose per-stream stats */
1416 for (i = 0; i < nb_input_files; i++) {
1417 InputFile *f = input_files[i];
1418 uint64_t total_packets = 0, total_size = 0;
1420 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1421 i, f->ctx->filename);
1423 for (j = 0; j < f->nb_streams; j++) {
1424 InputStream *ist = input_streams[f->ist_index + j];
1425 enum AVMediaType type = ist->dec_ctx->codec_type;
1427 total_size += ist->data_size;
1428 total_packets += ist->nb_packets;
1430 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1431 i, j, media_type_string(type));
1432 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1433 ist->nb_packets, ist->data_size);
1435 if (ist->decoding_needed) {
1436 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1437 ist->frames_decoded);
1438 if (type == AVMEDIA_TYPE_AUDIO)
1439 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1440 av_log(NULL, AV_LOG_VERBOSE, "; ");
1443 av_log(NULL, AV_LOG_VERBOSE, "\n");
1446 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1447 total_packets, total_size);
1450 for (i = 0; i < nb_output_files; i++) {
1451 OutputFile *of = output_files[i];
1452 uint64_t total_packets = 0, total_size = 0;
1454 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1455 i, of->ctx->filename);
1457 for (j = 0; j < of->ctx->nb_streams; j++) {
1458 OutputStream *ost = output_streams[of->ost_index + j];
1459 enum AVMediaType type = ost->enc_ctx->codec_type;
1461 total_size += ost->data_size;
1462 total_packets += ost->packets_written;
1464 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1465 i, j, media_type_string(type));
1466 if (ost->encoding_needed) {
1467 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1468 ost->frames_encoded);
1469 if (type == AVMEDIA_TYPE_AUDIO)
1470 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1471 av_log(NULL, AV_LOG_VERBOSE, "; ");
1474 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1475 ost->packets_written, ost->data_size);
1477 av_log(NULL, AV_LOG_VERBOSE, "\n");
1480 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1481 total_packets, total_size);
1483 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1484 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1486 av_log(NULL, AV_LOG_WARNING, "\n");
1488 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1493 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1496 AVBPrint buf_script;
1498 AVFormatContext *oc;
1500 AVCodecContext *enc;
1501 int frame_number, vid, i;
1504 int64_t pts = INT64_MIN + 1;
1505 static int64_t last_time = -1;
1506 static int qp_histogram[52];
1507 int hours, mins, secs, us;
1511 if (!print_stats && !is_last_report && !progress_avio)
1514 if (!is_last_report) {
1515 if (last_time == -1) {
1516 last_time = cur_time;
1519 if ((cur_time - last_time) < 500000)
1521 last_time = cur_time;
1524 t = (cur_time-timer_start) / 1000000.0;
1527 oc = output_files[0]->ctx;
1529 total_size = avio_size(oc->pb);
1530 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1531 total_size = avio_tell(oc->pb);
1535 av_bprint_init(&buf_script, 0, 1);
1536 for (i = 0; i < nb_output_streams; i++) {
1538 ost = output_streams[i];
1540 if (!ost->stream_copy)
1541 q = ost->quality / (float) FF_QP2LAMBDA;
1543 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1544 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1545 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1546 ost->file_index, ost->index, q);
1548 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1551 frame_number = ost->frame_number;
1552 fps = t > 1 ? frame_number / t : 0;
1553 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1554 frame_number, fps < 9.95, fps, q);
1555 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1556 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1557 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1558 ost->file_index, ost->index, q);
1560 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1564 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1566 for (j = 0; j < 32; j++)
1567 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1570 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1572 double error, error_sum = 0;
1573 double scale, scale_sum = 0;
1575 char type[3] = { 'Y','U','V' };
1576 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1577 for (j = 0; j < 3; j++) {
1578 if (is_last_report) {
1579 error = enc->error[j];
1580 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1582 error = ost->error[j];
1583 scale = enc->width * enc->height * 255.0 * 255.0;
1589 p = psnr(error / scale);
1590 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1591 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1592 ost->file_index, ost->index, type[j] | 32, p);
1594 p = psnr(error_sum / scale_sum);
1595 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1596 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1597 ost->file_index, ost->index, p);
1601 /* compute min output value */
1602 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1603 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1604 ost->st->time_base, AV_TIME_BASE_Q));
1606 nb_frames_drop += ost->last_dropped;
1609 secs = FFABS(pts) / AV_TIME_BASE;
1610 us = FFABS(pts) % AV_TIME_BASE;
1616 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1617 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1619 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1621 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1622 "size=%8.0fkB time=", total_size / 1024.0);
1624 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1625 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1626 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1627 (100 * us) / AV_TIME_BASE);
1630 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1631 av_bprintf(&buf_script, "bitrate=N/A\n");
1633 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1634 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1637 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1638 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1639 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1640 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1641 hours, mins, secs, us);
1643 if (nb_frames_dup || nb_frames_drop)
1644 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1645 nb_frames_dup, nb_frames_drop);
1646 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1647 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1650 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1651 av_bprintf(&buf_script, "speed=N/A\n");
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1654 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1657 if (print_stats || is_last_report) {
1658 const char end = is_last_report ? '\n' : '\r';
1659 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1660 fprintf(stderr, "%s %c", buf, end);
1662 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1667 if (progress_avio) {
1668 av_bprintf(&buf_script, "progress=%s\n",
1669 is_last_report ? "end" : "continue");
1670 avio_write(progress_avio, buf_script.str,
1671 FFMIN(buf_script.len, buf_script.size - 1));
1672 avio_flush(progress_avio);
1673 av_bprint_finalize(&buf_script, NULL);
1674 if (is_last_report) {
1675 if ((ret = avio_closep(&progress_avio)) < 0)
1676 av_log(NULL, AV_LOG_ERROR,
1677 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1682 print_final_stats(total_size);
1685 static void flush_encoders(void)
1689 for (i = 0; i < nb_output_streams; i++) {
1690 OutputStream *ost = output_streams[i];
1691 AVCodecContext *enc = ost->enc_ctx;
1692 AVFormatContext *os = output_files[ost->file_index]->ctx;
1693 int stop_encoding = 0;
1695 if (!ost->encoding_needed)
1698 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1700 #if FF_API_LAVF_FMT_RAWPICTURE
1701 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1706 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1709 switch (enc->codec_type) {
1710 case AVMEDIA_TYPE_AUDIO:
1711 encode = avcodec_encode_audio2;
1714 case AVMEDIA_TYPE_VIDEO:
1715 encode = avcodec_encode_video2;
1726 av_init_packet(&pkt);
1730 update_benchmark(NULL);
1731 ret = encode(enc, &pkt, NULL, &got_packet);
1732 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1734 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1739 if (ost->logfile && enc->stats_out) {
1740 fprintf(ost->logfile, "%s", enc->stats_out);
1746 if (ost->finished & MUXER_FINISHED) {
1747 av_packet_unref(&pkt);
1750 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1751 pkt_size = pkt.size;
1752 write_frame(os, &pkt, ost);
1753 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1754 do_video_stats(ost, pkt_size);
1765 * Check whether a packet from ist should be written into ost at this time
1767 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1769 OutputFile *of = output_files[ost->file_index];
1770 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1772 if (ost->source_index != ist_index)
1778 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1784 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1786 OutputFile *of = output_files[ost->file_index];
1787 InputFile *f = input_files [ist->file_index];
1788 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1789 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1793 av_init_packet(&opkt);
1795 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1796 !ost->copy_initial_nonkeyframes)
1799 if (!ost->frame_number && !ost->copy_prior_start) {
1800 int64_t comp_start = start_time;
1801 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1802 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1803 if (pkt->pts == AV_NOPTS_VALUE ?
1804 ist->pts < comp_start :
1805 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1809 if (of->recording_time != INT64_MAX &&
1810 ist->pts >= of->recording_time + start_time) {
1811 close_output_stream(ost);
1815 if (f->recording_time != INT64_MAX) {
1816 start_time = f->ctx->start_time;
1817 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1818 start_time += f->start_time;
1819 if (ist->pts >= f->recording_time + start_time) {
1820 close_output_stream(ost);
1825 /* force the input stream PTS */
1826 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1829 if (pkt->pts != AV_NOPTS_VALUE)
1830 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1832 opkt.pts = AV_NOPTS_VALUE;
1834 if (pkt->dts == AV_NOPTS_VALUE)
1835 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1837 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1838 opkt.dts -= ost_tb_start_time;
1840 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1841 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1843 duration = ist->dec_ctx->frame_size;
1844 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1845 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1846 ost->st->time_base) - ost_tb_start_time;
1849 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1850 opkt.flags = pkt->flags;
1851 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1852 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1853 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1854 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1855 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1857 int ret = av_parser_change(ost->parser, ost->st->codec,
1858 &opkt.data, &opkt.size,
1859 pkt->data, pkt->size,
1860 pkt->flags & AV_PKT_FLAG_KEY);
1862 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1867 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1872 opkt.data = pkt->data;
1873 opkt.size = pkt->size;
1875 av_copy_packet_side_data(&opkt, pkt);
1877 #if FF_API_LAVF_FMT_RAWPICTURE
1878 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1879 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1880 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1881 /* store AVPicture in AVPacket, as expected by the output format */
1882 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1884 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1888 opkt.data = (uint8_t *)&pict;
1889 opkt.size = sizeof(AVPicture);
1890 opkt.flags |= AV_PKT_FLAG_KEY;
1894 write_frame(of->ctx, &opkt, ost);
1897 int guess_input_channel_layout(InputStream *ist)
1899 AVCodecContext *dec = ist->dec_ctx;
1901 if (!dec->channel_layout) {
1902 char layout_name[256];
1904 if (dec->channels > ist->guess_layout_max)
1906 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1907 if (!dec->channel_layout)
1909 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1910 dec->channels, dec->channel_layout);
1911 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1912 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1917 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1919 if (*got_output || ret<0)
1920 decode_error_stat[ret<0] ++;
1922 if (ret < 0 && exit_on_error)
1925 if (exit_on_error && *got_output && ist) {
1926 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1927 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1933 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1935 AVFrame *decoded_frame, *f;
1936 AVCodecContext *avctx = ist->dec_ctx;
1937 int i, ret, err = 0, resample_changed;
1938 AVRational decoded_frame_tb;
1940 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1941 return AVERROR(ENOMEM);
1942 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1943 return AVERROR(ENOMEM);
1944 decoded_frame = ist->decoded_frame;
1946 update_benchmark(NULL);
1947 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1948 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1950 if (ret >= 0 && avctx->sample_rate <= 0) {
1951 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1952 ret = AVERROR_INVALIDDATA;
1955 check_decode_result(ist, got_output, ret);
1957 if (!*got_output || ret < 0)
1960 ist->samples_decoded += decoded_frame->nb_samples;
1961 ist->frames_decoded++;
1964 /* increment next_dts to use for the case where the input stream does not
1965 have timestamps or there are multiple frames in the packet */
1966 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1968 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1973 ist->resample_channels != avctx->channels ||
1974 ist->resample_channel_layout != decoded_frame->channel_layout ||
1975 ist->resample_sample_rate != decoded_frame->sample_rate;
1976 if (resample_changed) {
1977 char layout1[64], layout2[64];
1979 if (!guess_input_channel_layout(ist)) {
1980 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1981 "layout for Input Stream #%d.%d\n", ist->file_index,
1985 decoded_frame->channel_layout = avctx->channel_layout;
1987 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1988 ist->resample_channel_layout);
1989 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1990 decoded_frame->channel_layout);
1992 av_log(NULL, AV_LOG_INFO,
1993 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1994 ist->file_index, ist->st->index,
1995 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1996 ist->resample_channels, layout1,
1997 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1998 avctx->channels, layout2);
2000 ist->resample_sample_fmt = decoded_frame->format;
2001 ist->resample_sample_rate = decoded_frame->sample_rate;
2002 ist->resample_channel_layout = decoded_frame->channel_layout;
2003 ist->resample_channels = avctx->channels;
2005 for (i = 0; i < nb_filtergraphs; i++)
2006 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2007 FilterGraph *fg = filtergraphs[i];
2008 if (configure_filtergraph(fg) < 0) {
2009 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2015 /* if the decoder provides a pts, use it instead of the last packet pts.
2016 the decoder could be delaying output by a packet or more. */
2017 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2018 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2019 decoded_frame_tb = avctx->time_base;
2020 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2021 decoded_frame->pts = decoded_frame->pkt_pts;
2022 decoded_frame_tb = ist->st->time_base;
2023 } else if (pkt->pts != AV_NOPTS_VALUE) {
2024 decoded_frame->pts = pkt->pts;
2025 decoded_frame_tb = ist->st->time_base;
2027 decoded_frame->pts = ist->dts;
2028 decoded_frame_tb = AV_TIME_BASE_Q;
2030 pkt->pts = AV_NOPTS_VALUE;
2031 if (decoded_frame->pts != AV_NOPTS_VALUE)
2032 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2033 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2034 (AVRational){1, avctx->sample_rate});
2035 ist->nb_samples = decoded_frame->nb_samples;
2036 for (i = 0; i < ist->nb_filters; i++) {
2037 if (i < ist->nb_filters - 1) {
2038 f = ist->filter_frame;
2039 err = av_frame_ref(f, decoded_frame);
2044 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2045 AV_BUFFERSRC_FLAG_PUSH);
2046 if (err == AVERROR_EOF)
2047 err = 0; /* ignore */
2051 decoded_frame->pts = AV_NOPTS_VALUE;
2053 av_frame_unref(ist->filter_frame);
2054 av_frame_unref(decoded_frame);
2055 return err < 0 ? err : ret;
2058 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2060 AVFrame *decoded_frame, *f;
2061 int i, ret = 0, err = 0, resample_changed;
2062 int64_t best_effort_timestamp;
2063 AVRational *frame_sample_aspect;
2065 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2066 return AVERROR(ENOMEM);
2067 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2068 return AVERROR(ENOMEM);
2069 decoded_frame = ist->decoded_frame;
2070 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2072 update_benchmark(NULL);
2073 ret = avcodec_decode_video2(ist->dec_ctx,
2074 decoded_frame, got_output, pkt);
2075 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2077 // The following line may be required in some cases where there is no parser
2078 // or the parser does not has_b_frames correctly
2079 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2080 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2081 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2083 av_log(ist->dec_ctx, AV_LOG_WARNING,
2084 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2085 "If you want to help, upload a sample "
2086 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2087 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2088 ist->dec_ctx->has_b_frames,
2089 ist->st->codec->has_b_frames);
2092 check_decode_result(ist, got_output, ret);
2094 if (*got_output && ret >= 0) {
2095 if (ist->dec_ctx->width != decoded_frame->width ||
2096 ist->dec_ctx->height != decoded_frame->height ||
2097 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2098 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2099 decoded_frame->width,
2100 decoded_frame->height,
2101 decoded_frame->format,
2102 ist->dec_ctx->width,
2103 ist->dec_ctx->height,
2104 ist->dec_ctx->pix_fmt);
2108 if (!*got_output || ret < 0)
2111 if(ist->top_field_first>=0)
2112 decoded_frame->top_field_first = ist->top_field_first;
2114 ist->frames_decoded++;
2116 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2117 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2121 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2123 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2124 if(best_effort_timestamp != AV_NOPTS_VALUE)
2125 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2128 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2129 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2130 ist->st->index, av_ts2str(decoded_frame->pts),
2131 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2132 best_effort_timestamp,
2133 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2134 decoded_frame->key_frame, decoded_frame->pict_type,
2135 ist->st->time_base.num, ist->st->time_base.den);
2140 if (ist->st->sample_aspect_ratio.num)
2141 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2143 resample_changed = ist->resample_width != decoded_frame->width ||
2144 ist->resample_height != decoded_frame->height ||
2145 ist->resample_pix_fmt != decoded_frame->format;
2146 if (resample_changed) {
2147 av_log(NULL, AV_LOG_INFO,
2148 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2149 ist->file_index, ist->st->index,
2150 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2151 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2153 ist->resample_width = decoded_frame->width;
2154 ist->resample_height = decoded_frame->height;
2155 ist->resample_pix_fmt = decoded_frame->format;
2157 for (i = 0; i < nb_filtergraphs; i++) {
2158 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2159 configure_filtergraph(filtergraphs[i]) < 0) {
2160 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2166 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2167 for (i = 0; i < ist->nb_filters; i++) {
2168 if (!frame_sample_aspect->num)
2169 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2171 if (i < ist->nb_filters - 1) {
2172 f = ist->filter_frame;
2173 err = av_frame_ref(f, decoded_frame);
2178 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2179 if (ret == AVERROR_EOF) {
2180 ret = 0; /* ignore */
2181 } else if (ret < 0) {
2182 av_log(NULL, AV_LOG_FATAL,
2183 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2189 av_frame_unref(ist->filter_frame);
2190 av_frame_unref(decoded_frame);
2191 return err < 0 ? err : ret;
2194 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2196 AVSubtitle subtitle;
2197 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2198 &subtitle, got_output, pkt);
2200 check_decode_result(NULL, got_output, ret);
2202 if (ret < 0 || !*got_output) {
2204 sub2video_flush(ist);
2208 if (ist->fix_sub_duration) {
2210 if (ist->prev_sub.got_output) {
2211 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2212 1000, AV_TIME_BASE);
2213 if (end < ist->prev_sub.subtitle.end_display_time) {
2214 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2215 "Subtitle duration reduced from %d to %d%s\n",
2216 ist->prev_sub.subtitle.end_display_time, end,
2217 end <= 0 ? ", dropping it" : "");
2218 ist->prev_sub.subtitle.end_display_time = end;
2221 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2222 FFSWAP(int, ret, ist->prev_sub.ret);
2223 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2231 sub2video_update(ist, &subtitle);
2233 if (!subtitle.num_rects)
2236 ist->frames_decoded++;
2238 for (i = 0; i < nb_output_streams; i++) {
2239 OutputStream *ost = output_streams[i];
2241 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2242 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2245 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2249 avsubtitle_free(&subtitle);
2253 static int send_filter_eof(InputStream *ist)
2256 for (i = 0; i < ist->nb_filters; i++) {
2257 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2264 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2265 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2271 if (!ist->saw_first_ts) {
2272 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2274 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2275 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2276 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2278 ist->saw_first_ts = 1;
2281 if (ist->next_dts == AV_NOPTS_VALUE)
2282 ist->next_dts = ist->dts;
2283 if (ist->next_pts == AV_NOPTS_VALUE)
2284 ist->next_pts = ist->pts;
2288 av_init_packet(&avpkt);
2296 if (pkt->dts != AV_NOPTS_VALUE) {
2297 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2298 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2299 ist->next_pts = ist->pts = ist->dts;
2302 // while we have more to decode or while the decoder did output something on EOF
2303 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2307 ist->pts = ist->next_pts;
2308 ist->dts = ist->next_dts;
2310 if (avpkt.size && avpkt.size != pkt->size &&
2311 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2312 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2313 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2314 ist->showed_multi_packet_warning = 1;
2317 switch (ist->dec_ctx->codec_type) {
2318 case AVMEDIA_TYPE_AUDIO:
2319 ret = decode_audio (ist, &avpkt, &got_output);
2321 case AVMEDIA_TYPE_VIDEO:
2322 ret = decode_video (ist, &avpkt, &got_output);
2323 if (avpkt.duration) {
2324 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2325 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2326 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2327 duration = ((int64_t)AV_TIME_BASE *
2328 ist->dec_ctx->framerate.den * ticks) /
2329 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2333 if(ist->dts != AV_NOPTS_VALUE && duration) {
2334 ist->next_dts += duration;
2336 ist->next_dts = AV_NOPTS_VALUE;
2339 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2341 case AVMEDIA_TYPE_SUBTITLE:
2342 ret = transcode_subtitles(ist, &avpkt, &got_output);
2349 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2350 ist->file_index, ist->st->index, av_err2str(ret));
2357 avpkt.pts= AV_NOPTS_VALUE;
2359 // touch data and size only if not EOF
2361 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2369 if (got_output && !pkt)
2373 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2374 /* except when looping we need to flush but not to send an EOF */
2375 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2376 int ret = send_filter_eof(ist);
2378 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2383 /* handle stream copy */
2384 if (!ist->decoding_needed) {
2385 ist->dts = ist->next_dts;
2386 switch (ist->dec_ctx->codec_type) {
2387 case AVMEDIA_TYPE_AUDIO:
2388 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2389 ist->dec_ctx->sample_rate;
2391 case AVMEDIA_TYPE_VIDEO:
2392 if (ist->framerate.num) {
2393 // TODO: Remove work-around for c99-to-c89 issue 7
2394 AVRational time_base_q = AV_TIME_BASE_Q;
2395 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2396 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2397 } else if (pkt->duration) {
2398 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2399 } else if(ist->dec_ctx->framerate.num != 0) {
2400 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2401 ist->next_dts += ((int64_t)AV_TIME_BASE *
2402 ist->dec_ctx->framerate.den * ticks) /
2403 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2407 ist->pts = ist->dts;
2408 ist->next_pts = ist->next_dts;
2410 for (i = 0; pkt && i < nb_output_streams; i++) {
2411 OutputStream *ost = output_streams[i];
2413 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2416 do_streamcopy(ist, ost, pkt);
2422 static void print_sdp(void)
2427 AVIOContext *sdp_pb;
2428 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2432 for (i = 0, j = 0; i < nb_output_files; i++) {
2433 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2434 avc[j] = output_files[i]->ctx;
2442 av_sdp_create(avc, j, sdp, sizeof(sdp));
2444 if (!sdp_filename) {
2445 printf("SDP:\n%s\n", sdp);
2448 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2449 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2451 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2452 avio_closep(&sdp_pb);
2453 av_freep(&sdp_filename);
2461 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2464 for (i = 0; hwaccels[i].name; i++)
2465 if (hwaccels[i].pix_fmt == pix_fmt)
2466 return &hwaccels[i];
2470 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2472 InputStream *ist = s->opaque;
2473 const enum AVPixelFormat *p;
2476 for (p = pix_fmts; *p != -1; p++) {
2477 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2478 const HWAccel *hwaccel;
2480 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2483 hwaccel = get_hwaccel(*p);
2485 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2486 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2489 ret = hwaccel->init(s);
2491 if (ist->hwaccel_id == hwaccel->id) {
2492 av_log(NULL, AV_LOG_FATAL,
2493 "%s hwaccel requested for input stream #%d:%d, "
2494 "but cannot be initialized.\n", hwaccel->name,
2495 ist->file_index, ist->st->index);
2496 return AV_PIX_FMT_NONE;
2500 ist->active_hwaccel_id = hwaccel->id;
2501 ist->hwaccel_pix_fmt = *p;
2508 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2510 InputStream *ist = s->opaque;
2512 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2513 return ist->hwaccel_get_buffer(s, frame, flags);
2515 return avcodec_default_get_buffer2(s, frame, flags);
2518 static int init_input_stream(int ist_index, char *error, int error_len)
2521 InputStream *ist = input_streams[ist_index];
2523 if (ist->decoding_needed) {
2524 AVCodec *codec = ist->dec;
2526 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2527 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2528 return AVERROR(EINVAL);
2531 ist->dec_ctx->opaque = ist;
2532 ist->dec_ctx->get_format = get_format;
2533 ist->dec_ctx->get_buffer2 = get_buffer;
2534 ist->dec_ctx->thread_safe_callbacks = 1;
2536 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2537 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2538 (ist->decoding_needed & DECODING_FOR_OST)) {
2539 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2540 if (ist->decoding_needed & DECODING_FOR_FILTER)
2541 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2544 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2545 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2546 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2547 if (ret == AVERROR_EXPERIMENTAL)
2548 abort_codec_experimental(codec, 0);
2550 snprintf(error, error_len,
2551 "Error while opening decoder for input stream "
2553 ist->file_index, ist->st->index, av_err2str(ret));
2556 assert_avoptions(ist->decoder_opts);
2559 ist->next_pts = AV_NOPTS_VALUE;
2560 ist->next_dts = AV_NOPTS_VALUE;
2565 static InputStream *get_input_stream(OutputStream *ost)
2567 if (ost->source_index >= 0)
2568 return input_streams[ost->source_index];
2572 static int compare_int64(const void *a, const void *b)
2574 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2577 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2581 if (ost->encoding_needed) {
2582 AVCodec *codec = ost->enc;
2583 AVCodecContext *dec = NULL;
2586 if ((ist = get_input_stream(ost)))
2588 if (dec && dec->subtitle_header) {
2589 /* ASS code assumes this buffer is null terminated so add extra byte. */
2590 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2591 if (!ost->enc_ctx->subtitle_header)
2592 return AVERROR(ENOMEM);
2593 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2594 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2596 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2597 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2598 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2600 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2601 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2602 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2604 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2605 if (ret == AVERROR_EXPERIMENTAL)
2606 abort_codec_experimental(codec, 1);
2607 snprintf(error, error_len,
2608 "Error while opening encoder for output stream #%d:%d - "
2609 "maybe incorrect parameters such as bit_rate, rate, width or height",
2610 ost->file_index, ost->index);
2613 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2614 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2615 av_buffersink_set_frame_size(ost->filter->filter,
2616 ost->enc_ctx->frame_size);
2617 assert_avoptions(ost->encoder_opts);
2618 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2619 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2620 " It takes bits/s as argument, not kbits/s\n");
2622 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2624 av_log(NULL, AV_LOG_FATAL,
2625 "Error initializing the output stream codec context.\n");
2629 if (ost->enc_ctx->nb_coded_side_data) {
2632 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2633 sizeof(*ost->st->side_data));
2634 if (!ost->st->side_data)
2635 return AVERROR(ENOMEM);
2637 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2638 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2639 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2641 sd_dst->data = av_malloc(sd_src->size);
2643 return AVERROR(ENOMEM);
2644 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2645 sd_dst->size = sd_src->size;
2646 sd_dst->type = sd_src->type;
2647 ost->st->nb_side_data++;
2651 // copy timebase while removing common factors
2652 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2653 ost->st->codec->codec= ost->enc_ctx->codec;
2655 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2657 av_log(NULL, AV_LOG_FATAL,
2658 "Error setting up codec context options.\n");
2661 // copy timebase while removing common factors
2662 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2668 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2669 AVCodecContext *avctx)
2672 int n = 1, i, size, index = 0;
2675 for (p = kf; *p; p++)
2679 pts = av_malloc_array(size, sizeof(*pts));
2681 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2686 for (i = 0; i < n; i++) {
2687 char *next = strchr(p, ',');
2692 if (!memcmp(p, "chapters", 8)) {
2694 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2697 if (avf->nb_chapters > INT_MAX - size ||
2698 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2700 av_log(NULL, AV_LOG_FATAL,
2701 "Could not allocate forced key frames array.\n");
2704 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2705 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2707 for (j = 0; j < avf->nb_chapters; j++) {
2708 AVChapter *c = avf->chapters[j];
2709 av_assert1(index < size);
2710 pts[index++] = av_rescale_q(c->start, c->time_base,
2711 avctx->time_base) + t;
2716 t = parse_time_or_die("force_key_frames", p, 1);
2717 av_assert1(index < size);
2718 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2725 av_assert0(index == size);
2726 qsort(pts, size, sizeof(*pts), compare_int64);
2727 ost->forced_kf_count = size;
2728 ost->forced_kf_pts = pts;
2731 static void report_new_stream(int input_index, AVPacket *pkt)
2733 InputFile *file = input_files[input_index];
2734 AVStream *st = file->ctx->streams[pkt->stream_index];
2736 if (pkt->stream_index < file->nb_streams_warn)
2738 av_log(file->ctx, AV_LOG_WARNING,
2739 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2740 av_get_media_type_string(st->codec->codec_type),
2741 input_index, pkt->stream_index,
2742 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2743 file->nb_streams_warn = pkt->stream_index + 1;
2746 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2748 AVDictionaryEntry *e;
2750 uint8_t *encoder_string;
2751 int encoder_string_len;
2752 int format_flags = 0;
2753 int codec_flags = 0;
2755 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2758 e = av_dict_get(of->opts, "fflags", NULL, 0);
2760 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2763 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2765 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2767 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2770 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2773 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2774 encoder_string = av_mallocz(encoder_string_len);
2775 if (!encoder_string)
2778 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2779 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2781 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2782 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2783 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2784 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2787 static int transcode_init(void)
2789 int ret = 0, i, j, k;
2790 AVFormatContext *oc;
2793 char error[1024] = {0};
2796 for (i = 0; i < nb_filtergraphs; i++) {
2797 FilterGraph *fg = filtergraphs[i];
2798 for (j = 0; j < fg->nb_outputs; j++) {
2799 OutputFilter *ofilter = fg->outputs[j];
2800 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2802 if (fg->nb_inputs != 1)
2804 for (k = nb_input_streams-1; k >= 0 ; k--)
2805 if (fg->inputs[0]->ist == input_streams[k])
2807 ofilter->ost->source_index = k;
2811 /* init framerate emulation */
2812 for (i = 0; i < nb_input_files; i++) {
2813 InputFile *ifile = input_files[i];
2814 if (ifile->rate_emu)
2815 for (j = 0; j < ifile->nb_streams; j++)
2816 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2819 /* for each output stream, we compute the right encoding parameters */
2820 for (i = 0; i < nb_output_streams; i++) {
2821 AVCodecContext *enc_ctx;
2822 AVCodecContext *dec_ctx = NULL;
2823 ost = output_streams[i];
2824 oc = output_files[ost->file_index]->ctx;
2825 ist = get_input_stream(ost);
2827 if (ost->attachment_filename)
2830 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2833 dec_ctx = ist->dec_ctx;
2835 ost->st->disposition = ist->st->disposition;
2836 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2837 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2839 for (j=0; j<oc->nb_streams; j++) {
2840 AVStream *st = oc->streams[j];
2841 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2844 if (j == oc->nb_streams)
2845 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2846 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2849 if (ost->stream_copy) {
2851 uint64_t extra_size;
2853 av_assert0(ist && !ost->filter);
2855 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2857 if (extra_size > INT_MAX) {
2858 return AVERROR(EINVAL);
2861 /* if stream_copy is selected, no need to decode or encode */
2862 enc_ctx->codec_id = dec_ctx->codec_id;
2863 enc_ctx->codec_type = dec_ctx->codec_type;
2865 if (!enc_ctx->codec_tag) {
2866 unsigned int codec_tag;
2867 if (!oc->oformat->codec_tag ||
2868 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2869 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2870 enc_ctx->codec_tag = dec_ctx->codec_tag;
2873 enc_ctx->bit_rate = dec_ctx->bit_rate;
2874 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2875 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2876 enc_ctx->field_order = dec_ctx->field_order;
2877 if (dec_ctx->extradata_size) {
2878 enc_ctx->extradata = av_mallocz(extra_size);
2879 if (!enc_ctx->extradata) {
2880 return AVERROR(ENOMEM);
2882 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2884 enc_ctx->extradata_size= dec_ctx->extradata_size;
2885 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2887 enc_ctx->time_base = ist->st->time_base;
2889 * Avi is a special case here because it supports variable fps but
2890 * having the fps and timebase differe significantly adds quite some
2893 if(!strcmp(oc->oformat->name, "avi")) {
2894 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2895 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2896 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2897 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2899 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2900 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2901 enc_ctx->ticks_per_frame = 2;
2902 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2903 && av_q2d(ist->st->time_base) < 1.0/500
2905 enc_ctx->time_base = dec_ctx->time_base;
2906 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2907 enc_ctx->time_base.den *= 2;
2908 enc_ctx->ticks_per_frame = 2;
2910 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2911 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2912 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2913 && strcmp(oc->oformat->name, "f4v")
2915 if( copy_tb<0 && dec_ctx->time_base.den
2916 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2917 && av_q2d(ist->st->time_base) < 1.0/500
2919 enc_ctx->time_base = dec_ctx->time_base;
2920 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2923 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2924 && dec_ctx->time_base.num < dec_ctx->time_base.den
2925 && dec_ctx->time_base.num > 0
2926 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2927 enc_ctx->time_base = dec_ctx->time_base;
2930 if (!ost->frame_rate.num)
2931 ost->frame_rate = ist->framerate;
2932 if(ost->frame_rate.num)
2933 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2935 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2936 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2938 if (ist->st->nb_side_data) {
2939 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2940 sizeof(*ist->st->side_data));
2941 if (!ost->st->side_data)
2942 return AVERROR(ENOMEM);
2944 ost->st->nb_side_data = 0;
2945 for (j = 0; j < ist->st->nb_side_data; j++) {
2946 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2947 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2949 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2952 sd_dst->data = av_malloc(sd_src->size);
2954 return AVERROR(ENOMEM);
2955 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2956 sd_dst->size = sd_src->size;
2957 sd_dst->type = sd_src->type;
2958 ost->st->nb_side_data++;
2962 ost->parser = av_parser_init(enc_ctx->codec_id);
2964 switch (enc_ctx->codec_type) {
2965 case AVMEDIA_TYPE_AUDIO:
2966 if (audio_volume != 256) {
2967 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2970 enc_ctx->channel_layout = dec_ctx->channel_layout;
2971 enc_ctx->sample_rate = dec_ctx->sample_rate;
2972 enc_ctx->channels = dec_ctx->channels;
2973 enc_ctx->frame_size = dec_ctx->frame_size;
2974 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2975 enc_ctx->block_align = dec_ctx->block_align;
2976 enc_ctx->initial_padding = dec_ctx->delay;
2977 enc_ctx->profile = dec_ctx->profile;
2978 #if FF_API_AUDIOENC_DELAY
2979 enc_ctx->delay = dec_ctx->delay;
2981 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2982 enc_ctx->block_align= 0;
2983 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2984 enc_ctx->block_align= 0;
2986 case AVMEDIA_TYPE_VIDEO:
2987 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2988 enc_ctx->width = dec_ctx->width;
2989 enc_ctx->height = dec_ctx->height;
2990 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2991 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2993 av_mul_q(ost->frame_aspect_ratio,
2994 (AVRational){ enc_ctx->height, enc_ctx->width });
2995 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2996 "with stream copy may produce invalid files\n");
2998 else if (ist->st->sample_aspect_ratio.num)
2999 sar = ist->st->sample_aspect_ratio;
3001 sar = dec_ctx->sample_aspect_ratio;
3002 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3003 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3004 ost->st->r_frame_rate = ist->st->r_frame_rate;
3006 case AVMEDIA_TYPE_SUBTITLE:
3007 enc_ctx->width = dec_ctx->width;
3008 enc_ctx->height = dec_ctx->height;
3010 case AVMEDIA_TYPE_UNKNOWN:
3011 case AVMEDIA_TYPE_DATA:
3012 case AVMEDIA_TYPE_ATTACHMENT:
3019 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3021 /* should only happen when a default codec is not present. */
3022 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3023 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3024 ret = AVERROR(EINVAL);
3028 set_encoder_id(output_files[ost->file_index], ost);
3031 if (qsv_transcode_init(ost))
3036 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3037 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3039 fg = init_simple_filtergraph(ist, ost);
3040 if (configure_filtergraph(fg)) {
3041 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3046 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3047 if (!ost->frame_rate.num)
3048 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3049 if (ist && !ost->frame_rate.num)
3050 ost->frame_rate = ist->framerate;
3051 if (ist && !ost->frame_rate.num)
3052 ost->frame_rate = ist->st->r_frame_rate;
3053 if (ist && !ost->frame_rate.num) {
3054 ost->frame_rate = (AVRational){25, 1};
3055 av_log(NULL, AV_LOG_WARNING,
3057 "about the input framerate is available. Falling "
3058 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3059 "if you want a different framerate.\n",
3060 ost->file_index, ost->index);
3062 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3063 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3064 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3065 ost->frame_rate = ost->enc->supported_framerates[idx];
3067 // reduce frame rate for mpeg4 to be within the spec limits
3068 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3069 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3070 ost->frame_rate.num, ost->frame_rate.den, 65535);
3074 switch (enc_ctx->codec_type) {
3075 case AVMEDIA_TYPE_AUDIO:
3076 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3077 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3078 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3079 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3080 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3082 case AVMEDIA_TYPE_VIDEO:
3083 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3084 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3085 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3086 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3087 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3088 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3089 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3091 for (j = 0; j < ost->forced_kf_count; j++)
3092 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3094 enc_ctx->time_base);
3096 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3097 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3098 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3099 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3100 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3101 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3102 if (!strncmp(ost->enc->name, "libx264", 7) &&
3103 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3104 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3105 av_log(NULL, AV_LOG_WARNING,
3106 "No pixel format specified, %s for H.264 encoding chosen.\n"
3107 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3108 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3109 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3110 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3111 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3112 av_log(NULL, AV_LOG_WARNING,
3113 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3114 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3115 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3116 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3118 ost->st->avg_frame_rate = ost->frame_rate;
3121 enc_ctx->width != dec_ctx->width ||
3122 enc_ctx->height != dec_ctx->height ||
3123 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3124 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3127 if (ost->forced_keyframes) {
3128 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3129 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3130 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3132 av_log(NULL, AV_LOG_ERROR,
3133 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3136 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3137 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3138 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3139 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3141 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3142 // parse it only for static kf timings
3143 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3144 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3148 case AVMEDIA_TYPE_SUBTITLE:
3149 enc_ctx->time_base = (AVRational){1, 1000};
3150 if (!enc_ctx->width) {
3151 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3152 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3155 case AVMEDIA_TYPE_DATA:
3163 if (ost->disposition) {
3164 static const AVOption opts[] = {
3165 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3166 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3167 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3168 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3169 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3170 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3171 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3172 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3173 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3174 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3175 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3176 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3177 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3178 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3181 static const AVClass class = {
3183 .item_name = av_default_item_name,
3185 .version = LIBAVUTIL_VERSION_INT,
3187 const AVClass *pclass = &class;
3189 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3195 /* open each encoder */
3196 for (i = 0; i < nb_output_streams; i++) {
3197 ret = init_output_stream(output_streams[i], error, sizeof(error));
3202 /* init input streams */
3203 for (i = 0; i < nb_input_streams; i++)
3204 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3205 for (i = 0; i < nb_output_streams; i++) {
3206 ost = output_streams[i];
3207 avcodec_close(ost->enc_ctx);
3212 /* discard unused programs */
3213 for (i = 0; i < nb_input_files; i++) {
3214 InputFile *ifile = input_files[i];
3215 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3216 AVProgram *p = ifile->ctx->programs[j];
3217 int discard = AVDISCARD_ALL;
3219 for (k = 0; k < p->nb_stream_indexes; k++)
3220 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3221 discard = AVDISCARD_DEFAULT;
3224 p->discard = discard;
3228 /* open files and write file headers */
3229 for (i = 0; i < nb_output_files; i++) {
3230 oc = output_files[i]->ctx;
3231 oc->interrupt_callback = int_cb;
3232 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3233 snprintf(error, sizeof(error),
3234 "Could not write header for output file #%d "
3235 "(incorrect codec parameters ?): %s",
3236 i, av_err2str(ret));
3237 ret = AVERROR(EINVAL);
3240 // assert_avoptions(output_files[i]->opts);
3241 if (strcmp(oc->oformat->name, "rtp")) {
3247 /* dump the file output parameters - cannot be done before in case
3249 for (i = 0; i < nb_output_files; i++) {
3250 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3253 /* dump the stream mapping */
3254 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3255 for (i = 0; i < nb_input_streams; i++) {
3256 ist = input_streams[i];
3258 for (j = 0; j < ist->nb_filters; j++) {
3259 if (ist->filters[j]->graph->graph_desc) {
3260 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3261 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3262 ist->filters[j]->name);
3263 if (nb_filtergraphs > 1)
3264 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3265 av_log(NULL, AV_LOG_INFO, "\n");
3270 for (i = 0; i < nb_output_streams; i++) {
3271 ost = output_streams[i];
3273 if (ost->attachment_filename) {
3274 /* an attached file */
3275 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3276 ost->attachment_filename, ost->file_index, ost->index);
3280 if (ost->filter && ost->filter->graph->graph_desc) {
3281 /* output from a complex graph */
3282 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3283 if (nb_filtergraphs > 1)
3284 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3286 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3287 ost->index, ost->enc ? ost->enc->name : "?");
3291 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3292 input_streams[ost->source_index]->file_index,
3293 input_streams[ost->source_index]->st->index,
3296 if (ost->sync_ist != input_streams[ost->source_index])
3297 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3298 ost->sync_ist->file_index,
3299 ost->sync_ist->st->index);
3300 if (ost->stream_copy)
3301 av_log(NULL, AV_LOG_INFO, " (copy)");
3303 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3304 const AVCodec *out_codec = ost->enc;
3305 const char *decoder_name = "?";
3306 const char *in_codec_name = "?";
3307 const char *encoder_name = "?";
3308 const char *out_codec_name = "?";
3309 const AVCodecDescriptor *desc;
3312 decoder_name = in_codec->name;
3313 desc = avcodec_descriptor_get(in_codec->id);
3315 in_codec_name = desc->name;
3316 if (!strcmp(decoder_name, in_codec_name))
3317 decoder_name = "native";
3321 encoder_name = out_codec->name;
3322 desc = avcodec_descriptor_get(out_codec->id);
3324 out_codec_name = desc->name;
3325 if (!strcmp(encoder_name, out_codec_name))
3326 encoder_name = "native";
3329 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3330 in_codec_name, decoder_name,
3331 out_codec_name, encoder_name);
3333 av_log(NULL, AV_LOG_INFO, "\n");
3337 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3341 if (sdp_filename || want_sdp) {
3345 transcode_init_done = 1;
3350 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3351 static int need_output(void)
3355 for (i = 0; i < nb_output_streams; i++) {
3356 OutputStream *ost = output_streams[i];
3357 OutputFile *of = output_files[ost->file_index];
3358 AVFormatContext *os = output_files[ost->file_index]->ctx;
3360 if (ost->finished ||
3361 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3363 if (ost->frame_number >= ost->max_frames) {
3365 for (j = 0; j < of->ctx->nb_streams; j++)
3366 close_output_stream(output_streams[of->ost_index + j]);
3377 * Select the output stream to process.
3379 * @return selected output stream, or NULL if none available
3381 static OutputStream *choose_output(void)
3384 int64_t opts_min = INT64_MAX;
3385 OutputStream *ost_min = NULL;
3387 for (i = 0; i < nb_output_streams; i++) {
3388 OutputStream *ost = output_streams[i];
3389 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3390 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3392 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3393 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3395 if (!ost->finished && opts < opts_min) {
3397 ost_min = ost->unavailable ? NULL : ost;
3403 static void set_tty_echo(int on)
3407 if (tcgetattr(0, &tty) == 0) {
3408 if (on) tty.c_lflag |= ECHO;
3409 else tty.c_lflag &= ~ECHO;
3410 tcsetattr(0, TCSANOW, &tty);
3415 static int check_keyboard_interaction(int64_t cur_time)
3418 static int64_t last_time;
3419 if (received_nb_signals)
3420 return AVERROR_EXIT;
3421 /* read_key() returns 0 on EOF */
3422 if(cur_time - last_time >= 100000 && !run_as_daemon){
3424 last_time = cur_time;
3428 return AVERROR_EXIT;
3429 if (key == '+') av_log_set_level(av_log_get_level()+10);
3430 if (key == '-') av_log_set_level(av_log_get_level()-10);
3431 if (key == 's') qp_hist ^= 1;
3434 do_hex_dump = do_pkt_dump = 0;
3435 } else if(do_pkt_dump){
3439 av_log_set_level(AV_LOG_DEBUG);
3441 if (key == 'c' || key == 'C'){
3442 char buf[4096], target[64], command[256], arg[256] = {0};
3445 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3448 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3453 fprintf(stderr, "\n");
3455 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3456 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3457 target, time, command, arg);
3458 for (i = 0; i < nb_filtergraphs; i++) {
3459 FilterGraph *fg = filtergraphs[i];
3462 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3463 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3464 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3465 } else if (key == 'c') {
3466 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3467 ret = AVERROR_PATCHWELCOME;
3469 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3471 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3476 av_log(NULL, AV_LOG_ERROR,
3477 "Parse error, at least 3 arguments were expected, "
3478 "only %d given in string '%s'\n", n, buf);
3481 if (key == 'd' || key == 'D'){
3484 debug = input_streams[0]->st->codec->debug<<1;
3485 if(!debug) debug = 1;
3486 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3493 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3498 fprintf(stderr, "\n");
3499 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3500 fprintf(stderr,"error parsing debug value\n");
3502 for(i=0;i<nb_input_streams;i++) {
3503 input_streams[i]->st->codec->debug = debug;
3505 for(i=0;i<nb_output_streams;i++) {
3506 OutputStream *ost = output_streams[i];
3507 ost->enc_ctx->debug = debug;
3509 if(debug) av_log_set_level(AV_LOG_DEBUG);
3510 fprintf(stderr,"debug=%d\n", debug);
3513 fprintf(stderr, "key function\n"
3514 "? show this help\n"
3515 "+ increase verbosity\n"
3516 "- decrease verbosity\n"
3517 "c Send command to first matching filter supporting it\n"
3518 "C Send/Que command to all matching filters\n"
3519 "D cycle through available debug modes\n"
3520 "h dump packets/hex press to cycle through the 3 states\n"
3522 "s Show QP histogram\n"
3529 static void *input_thread(void *arg)
3532 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3537 ret = av_read_frame(f->ctx, &pkt);
3539 if (ret == AVERROR(EAGAIN)) {
3544 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3547 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3548 if (flags && ret == AVERROR(EAGAIN)) {
3550 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3551 av_log(f->ctx, AV_LOG_WARNING,
3552 "Thread message queue blocking; consider raising the "
3553 "thread_queue_size option (current value: %d)\n",
3554 f->thread_queue_size);
3557 if (ret != AVERROR_EOF)
3558 av_log(f->ctx, AV_LOG_ERROR,
3559 "Unable to send packet to main thread: %s\n",
3561 av_packet_unref(&pkt);
3562 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3570 static void free_input_threads(void)
3574 for (i = 0; i < nb_input_files; i++) {
3575 InputFile *f = input_files[i];
3578 if (!f || !f->in_thread_queue)
3580 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3581 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3582 av_packet_unref(&pkt);
3584 pthread_join(f->thread, NULL);
3586 av_thread_message_queue_free(&f->in_thread_queue);
3590 static int init_input_threads(void)
3594 if (nb_input_files == 1)
3597 for (i = 0; i < nb_input_files; i++) {
3598 InputFile *f = input_files[i];
3600 if (f->ctx->pb ? !f->ctx->pb->seekable :
3601 strcmp(f->ctx->iformat->name, "lavfi"))
3602 f->non_blocking = 1;
3603 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3604 f->thread_queue_size, sizeof(AVPacket));
3608 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3609 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3610 av_thread_message_queue_free(&f->in_thread_queue);
3611 return AVERROR(ret);
3617 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3619 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3621 AV_THREAD_MESSAGE_NONBLOCK : 0);
3625 static int get_input_packet(InputFile *f, AVPacket *pkt)
3629 for (i = 0; i < f->nb_streams; i++) {
3630 InputStream *ist = input_streams[f->ist_index + i];
3631 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3632 int64_t now = av_gettime_relative() - ist->start;
3634 return AVERROR(EAGAIN);
3639 if (nb_input_files > 1)
3640 return get_input_packet_mt(f, pkt);
3642 return av_read_frame(f->ctx, pkt);
3645 static int got_eagain(void)
3648 for (i = 0; i < nb_output_streams; i++)
3649 if (output_streams[i]->unavailable)
3654 static void reset_eagain(void)
3657 for (i = 0; i < nb_input_files; i++)
3658 input_files[i]->eagain = 0;
3659 for (i = 0; i < nb_output_streams; i++)
3660 output_streams[i]->unavailable = 0;
3663 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3664 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3665 AVRational time_base)
3671 return tmp_time_base;
3674 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3677 return tmp_time_base;
3683 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3686 AVCodecContext *avctx;
3687 int i, ret, has_audio = 0;
3688 int64_t duration = 0;
3690 ret = av_seek_frame(is, -1, is->start_time, 0);
3694 for (i = 0; i < ifile->nb_streams; i++) {
3695 ist = input_streams[ifile->ist_index + i];
3696 avctx = ist->dec_ctx;
3699 if (ist->decoding_needed) {
3700 process_input_packet(ist, NULL, 1);
3701 avcodec_flush_buffers(avctx);
3704 /* duration is the length of the last frame in a stream
3705 * when audio stream is present we don't care about
3706 * last video frame length because it's not defined exactly */
3707 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3711 for (i = 0; i < ifile->nb_streams; i++) {
3712 ist = input_streams[ifile->ist_index + i];
3713 avctx = ist->dec_ctx;
3716 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3717 AVRational sample_rate = {1, avctx->sample_rate};
3719 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3723 if (ist->framerate.num) {
3724 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3725 } else if (ist->st->avg_frame_rate.num) {
3726 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3727 } else duration = 1;
3729 if (!ifile->duration)
3730 ifile->time_base = ist->st->time_base;
3731 /* the total duration of the stream, max_pts - min_pts is
3732 * the duration of the stream without the last frame */
3733 duration += ist->max_pts - ist->min_pts;
3734 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3738 if (ifile->loop > 0)
3746 * - 0 -- one packet was read and processed
3747 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3748 * this function should be called again
3749 * - AVERROR_EOF -- this function should not be called again
3751 static int process_input(int file_index)
3753 InputFile *ifile = input_files[file_index];
3754 AVFormatContext *is;
3762 ret = get_input_packet(ifile, &pkt);
3764 if (ret == AVERROR(EAGAIN)) {
3768 if (ret < 0 && ifile->loop) {
3769 if ((ret = seek_to_start(ifile, is)) < 0)
3771 ret = get_input_packet(ifile, &pkt);
3774 if (ret != AVERROR_EOF) {
3775 print_error(is->filename, ret);
3780 for (i = 0; i < ifile->nb_streams; i++) {
3781 ist = input_streams[ifile->ist_index + i];
3782 if (ist->decoding_needed) {
3783 ret = process_input_packet(ist, NULL, 0);
3788 /* mark all outputs that don't go through lavfi as finished */
3789 for (j = 0; j < nb_output_streams; j++) {
3790 OutputStream *ost = output_streams[j];
3792 if (ost->source_index == ifile->ist_index + i &&
3793 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3794 finish_output_stream(ost);
3798 ifile->eof_reached = 1;
3799 return AVERROR(EAGAIN);
3805 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3806 is->streams[pkt.stream_index]);
3808 /* the following test is needed in case new streams appear
3809 dynamically in stream : we ignore them */
3810 if (pkt.stream_index >= ifile->nb_streams) {
3811 report_new_stream(file_index, &pkt);
3812 goto discard_packet;
3815 ist = input_streams[ifile->ist_index + pkt.stream_index];
3817 ist->data_size += pkt.size;
3821 goto discard_packet;
3823 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3824 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3829 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3830 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3831 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3832 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3833 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3834 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3835 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3836 av_ts2str(input_files[ist->file_index]->ts_offset),
3837 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3840 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3841 int64_t stime, stime2;
3842 // Correcting starttime based on the enabled streams
3843 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3844 // so we instead do it here as part of discontinuity handling
3845 if ( ist->next_dts == AV_NOPTS_VALUE
3846 && ifile->ts_offset == -is->start_time
3847 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3848 int64_t new_start_time = INT64_MAX;
3849 for (i=0; i<is->nb_streams; i++) {
3850 AVStream *st = is->streams[i];
3851 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3853 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3855 if (new_start_time > is->start_time) {
3856 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3857 ifile->ts_offset = -new_start_time;
3861 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3862 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3863 ist->wrap_correction_done = 1;
3865 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3866 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3867 ist->wrap_correction_done = 0;
3869 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3870 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3871 ist->wrap_correction_done = 0;
3875 /* add the stream-global side data to the first packet */
3876 if (ist->nb_packets == 1) {
3877 if (ist->st->nb_side_data)
3878 av_packet_split_side_data(&pkt);
3879 for (i = 0; i < ist->st->nb_side_data; i++) {
3880 AVPacketSideData *src_sd = &ist->st->side_data[i];
3883 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3885 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3888 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3892 memcpy(dst_data, src_sd->data, src_sd->size);
3896 if (pkt.dts != AV_NOPTS_VALUE)
3897 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3898 if (pkt.pts != AV_NOPTS_VALUE)
3899 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3901 if (pkt.pts != AV_NOPTS_VALUE)
3902 pkt.pts *= ist->ts_scale;
3903 if (pkt.dts != AV_NOPTS_VALUE)
3904 pkt.dts *= ist->ts_scale;
3906 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3907 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3908 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3909 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3910 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3911 int64_t delta = pkt_dts - ifile->last_ts;
3912 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3913 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3914 ifile->ts_offset -= delta;
3915 av_log(NULL, AV_LOG_DEBUG,
3916 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3917 delta, ifile->ts_offset);
3918 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3919 if (pkt.pts != AV_NOPTS_VALUE)
3920 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3924 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3925 if (pkt.pts != AV_NOPTS_VALUE) {
3926 pkt.pts += duration;
3927 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3928 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3931 if (pkt.dts != AV_NOPTS_VALUE)
3932 pkt.dts += duration;
3934 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3935 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3936 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3937 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3939 int64_t delta = pkt_dts - ist->next_dts;
3940 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3941 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3942 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3943 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3944 ifile->ts_offset -= delta;
3945 av_log(NULL, AV_LOG_DEBUG,
3946 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3947 delta, ifile->ts_offset);
3948 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3949 if (pkt.pts != AV_NOPTS_VALUE)
3950 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3953 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3954 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3955 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3956 pkt.dts = AV_NOPTS_VALUE;
3958 if (pkt.pts != AV_NOPTS_VALUE){
3959 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3960 delta = pkt_pts - ist->next_dts;
3961 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3962 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3963 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3964 pkt.pts = AV_NOPTS_VALUE;
3970 if (pkt.dts != AV_NOPTS_VALUE)
3971 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3974 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3975 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3976 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3977 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3978 av_ts2str(input_files[ist->file_index]->ts_offset),
3979 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3982 sub2video_heartbeat(ist, pkt.pts);
3984 process_input_packet(ist, &pkt, 0);
3987 av_packet_unref(&pkt);
3993 * Perform a step of transcoding for the specified filter graph.
3995 * @param[in] graph filter graph to consider
3996 * @param[out] best_ist input stream where a frame would allow to continue
3997 * @return 0 for success, <0 for error
3999 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4002 int nb_requests, nb_requests_max = 0;
4003 InputFilter *ifilter;
4007 ret = avfilter_graph_request_oldest(graph->graph);
4009 return reap_filters(0);
4011 if (ret == AVERROR_EOF) {
4012 ret = reap_filters(1);
4013 for (i = 0; i < graph->nb_outputs; i++)
4014 close_output_stream(graph->outputs[i]->ost);
4017 if (ret != AVERROR(EAGAIN))
4020 for (i = 0; i < graph->nb_inputs; i++) {
4021 ifilter = graph->inputs[i];
4023 if (input_files[ist->file_index]->eagain ||
4024 input_files[ist->file_index]->eof_reached)
4026 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4027 if (nb_requests > nb_requests_max) {
4028 nb_requests_max = nb_requests;
4034 for (i = 0; i < graph->nb_outputs; i++)
4035 graph->outputs[i]->ost->unavailable = 1;
4041 * Run a single step of transcoding.
4043 * @return 0 for success, <0 for error
4045 static int transcode_step(void)
4051 ost = choose_output();
4058 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4063 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4068 av_assert0(ost->source_index >= 0);
4069 ist = input_streams[ost->source_index];
4072 ret = process_input(ist->file_index);
4073 if (ret == AVERROR(EAGAIN)) {
4074 if (input_files[ist->file_index]->eagain)
4075 ost->unavailable = 1;
4080 return ret == AVERROR_EOF ? 0 : ret;
4082 return reap_filters(0);
4086 * The following code is the main loop of the file converter
4088 static int transcode(void)
4091 AVFormatContext *os;
4094 int64_t timer_start;
4095 int64_t total_packets_written = 0;
4097 ret = transcode_init();
4101 if (stdin_interaction) {
4102 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4105 timer_start = av_gettime_relative();
4108 if ((ret = init_input_threads()) < 0)
4112 while (!received_sigterm) {
4113 int64_t cur_time= av_gettime_relative();
4115 /* if 'q' pressed, exits */
4116 if (stdin_interaction)
4117 if (check_keyboard_interaction(cur_time) < 0)
4120 /* check if there's any stream where output is still needed */
4121 if (!need_output()) {
4122 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4126 ret = transcode_step();
4128 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4132 av_strerror(ret, errbuf, sizeof(errbuf));
4134 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4139 /* dump report by using the output first video and audio streams */
4140 print_report(0, timer_start, cur_time);
4143 free_input_threads();
4146 /* at the end of stream, we must flush the decoder buffers */
4147 for (i = 0; i < nb_input_streams; i++) {
4148 ist = input_streams[i];
4149 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4150 process_input_packet(ist, NULL, 0);
4157 /* write the trailer if needed and close file */
4158 for (i = 0; i < nb_output_files; i++) {
4159 os = output_files[i]->ctx;
4160 if ((ret = av_write_trailer(os)) < 0) {
4161 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4167 /* dump report by using the first video and audio streams */
4168 print_report(1, timer_start, av_gettime_relative());
4170 /* close each encoder */
4171 for (i = 0; i < nb_output_streams; i++) {
4172 ost = output_streams[i];
4173 if (ost->encoding_needed) {
4174 av_freep(&ost->enc_ctx->stats_in);
4176 total_packets_written += ost->packets_written;
4179 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4180 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4184 /* close each decoder */
4185 for (i = 0; i < nb_input_streams; i++) {
4186 ist = input_streams[i];
4187 if (ist->decoding_needed) {
4188 avcodec_close(ist->dec_ctx);
4189 if (ist->hwaccel_uninit)
4190 ist->hwaccel_uninit(ist->dec_ctx);
4199 free_input_threads();
4202 if (output_streams) {
4203 for (i = 0; i < nb_output_streams; i++) {
4204 ost = output_streams[i];
4207 if (fclose(ost->logfile))
4208 av_log(NULL, AV_LOG_ERROR,
4209 "Error closing logfile, loss of information possible: %s\n",
4210 av_err2str(AVERROR(errno)));
4211 ost->logfile = NULL;
4213 av_freep(&ost->forced_kf_pts);
4214 av_freep(&ost->apad);
4215 av_freep(&ost->disposition);
4216 av_dict_free(&ost->encoder_opts);
4217 av_dict_free(&ost->sws_dict);
4218 av_dict_free(&ost->swr_opts);
4219 av_dict_free(&ost->resample_opts);
4227 static int64_t getutime(void)
4230 struct rusage rusage;
4232 getrusage(RUSAGE_SELF, &rusage);
4233 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4234 #elif HAVE_GETPROCESSTIMES
4236 FILETIME c, e, k, u;
4237 proc = GetCurrentProcess();
4238 GetProcessTimes(proc, &c, &e, &k, &u);
4239 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4241 return av_gettime_relative();
4245 static int64_t getmaxrss(void)
4247 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4248 struct rusage rusage;
4249 getrusage(RUSAGE_SELF, &rusage);
4250 return (int64_t)rusage.ru_maxrss * 1024;
4251 #elif HAVE_GETPROCESSMEMORYINFO
4253 PROCESS_MEMORY_COUNTERS memcounters;
4254 proc = GetCurrentProcess();
4255 memcounters.cb = sizeof(memcounters);
4256 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4257 return memcounters.PeakPagefileUsage;
4263 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4267 int main(int argc, char **argv)
4272 register_exit(ffmpeg_cleanup);
4274 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4276 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4277 parse_loglevel(argc, argv, options);
4279 if(argc>1 && !strcmp(argv[1], "-d")){
4281 av_log_set_callback(log_callback_null);
4286 avcodec_register_all();
4288 avdevice_register_all();
4290 avfilter_register_all();
4292 avformat_network_init();
4294 show_banner(argc, argv, options);
4298 /* parse options and open all input/output files */
4299 ret = ffmpeg_parse_options(argc, argv);
4303 if (nb_output_files <= 0 && nb_input_files == 0) {
4305 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4309 /* file converter / grab */
4310 if (nb_output_files <= 0) {
4311 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4315 // if (nb_input_files == 0) {
4316 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4320 current_time = ti = getutime();
4321 if (transcode() < 0)
4323 ti = getutime() - ti;
4325 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4327 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4328 decode_error_stat[0], decode_error_stat[1]);
4329 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4332 exit_program(received_nb_signals ? 255 : main_return_code);
4333 return main_return_code;