2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
698 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
699 if (pkt->dts != AV_NOPTS_VALUE &&
700 pkt->pts != AV_NOPTS_VALUE &&
701 pkt->dts > pkt->pts) {
702 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
704 ost->file_index, ost->st->index);
706 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
707 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
708 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
711 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
712 pkt->dts != AV_NOPTS_VALUE &&
713 ost->last_mux_dts != AV_NOPTS_VALUE) {
714 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
715 if (pkt->dts < max) {
716 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
717 av_log(s, loglevel, "Non-monotonous DTS in output stream "
718 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
719 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
721 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
724 av_log(s, loglevel, "changing to %"PRId64". This may result "
725 "in incorrect timestamps in the output file.\n",
727 if(pkt->pts >= pkt->dts)
728 pkt->pts = FFMAX(pkt->pts, max);
733 ost->last_mux_dts = pkt->dts;
735 ost->data_size += pkt->size;
736 ost->packets_written++;
738 pkt->stream_index = ost->index;
741 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
742 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
743 av_get_media_type_string(ost->enc_ctx->codec_type),
744 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
745 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
750 ret = av_interleaved_write_frame(s, pkt);
752 print_error("av_interleaved_write_frame()", ret);
753 main_return_code = 1;
754 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
756 av_packet_unref(pkt);
759 static void close_output_stream(OutputStream *ost)
761 OutputFile *of = output_files[ost->file_index];
763 ost->finished |= ENCODER_FINISHED;
765 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
766 of->recording_time = FFMIN(of->recording_time, end);
770 static int check_recording_time(OutputStream *ost)
772 OutputFile *of = output_files[ost->file_index];
774 if (of->recording_time != INT64_MAX &&
775 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
776 AV_TIME_BASE_Q) >= 0) {
777 close_output_stream(ost);
783 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
786 AVCodecContext *enc = ost->enc_ctx;
790 av_init_packet(&pkt);
794 if (!check_recording_time(ost))
797 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
798 frame->pts = ost->sync_opts;
799 ost->sync_opts = frame->pts + frame->nb_samples;
800 ost->samples_encoded += frame->nb_samples;
801 ost->frames_encoded++;
803 av_assert0(pkt.size || !pkt.data);
804 update_benchmark(NULL);
806 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
807 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
808 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
809 enc->time_base.num, enc->time_base.den);
812 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
813 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
816 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
819 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
822 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
823 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
824 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
825 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
828 write_frame(s, &pkt, ost);
832 static void do_subtitle_out(AVFormatContext *s,
837 int subtitle_out_max_size = 1024 * 1024;
838 int subtitle_out_size, nb, i;
843 if (sub->pts == AV_NOPTS_VALUE) {
844 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
853 subtitle_out = av_malloc(subtitle_out_max_size);
855 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
860 /* Note: DVB subtitle need one packet to draw them and one other
861 packet to clear them */
862 /* XXX: signal it in the codec context ? */
863 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
868 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
870 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
871 pts -= output_files[ost->file_index]->start_time;
872 for (i = 0; i < nb; i++) {
873 unsigned save_num_rects = sub->num_rects;
875 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
876 if (!check_recording_time(ost))
880 // start_display_time is required to be 0
881 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
882 sub->end_display_time -= sub->start_display_time;
883 sub->start_display_time = 0;
887 ost->frames_encoded++;
889 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
890 subtitle_out_max_size, sub);
892 sub->num_rects = save_num_rects;
893 if (subtitle_out_size < 0) {
894 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
898 av_init_packet(&pkt);
899 pkt.data = subtitle_out;
900 pkt.size = subtitle_out_size;
901 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
902 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
903 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
904 /* XXX: the pts correction is handled here. Maybe handling
905 it in the codec would be better */
907 pkt.pts += 90 * sub->start_display_time;
909 pkt.pts += 90 * sub->end_display_time;
912 write_frame(s, &pkt, ost);
916 static void do_video_out(AVFormatContext *s,
918 AVFrame *next_picture,
921 int ret, format_video_sync;
923 AVCodecContext *enc = ost->enc_ctx;
924 AVCodecContext *mux_enc = ost->st->codec;
925 int nb_frames, nb0_frames, i;
926 double delta, delta0;
929 InputStream *ist = NULL;
930 AVFilterContext *filter = ost->filter->filter;
932 if (ost->source_index >= 0)
933 ist = input_streams[ost->source_index];
935 if (filter->inputs[0]->frame_rate.num > 0 &&
936 filter->inputs[0]->frame_rate.den > 0)
937 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
939 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
940 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
942 if (!ost->filters_script &&
946 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
947 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
952 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
953 ost->last_nb0_frames[1],
954 ost->last_nb0_frames[2]);
956 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
957 delta = delta0 + duration;
959 /* by default, we output a single frame */
960 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
963 format_video_sync = video_sync_method;
964 if (format_video_sync == VSYNC_AUTO) {
965 if(!strcmp(s->oformat->name, "avi")) {
966 format_video_sync = VSYNC_VFR;
968 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
970 && format_video_sync == VSYNC_CFR
971 && input_files[ist->file_index]->ctx->nb_streams == 1
972 && input_files[ist->file_index]->input_ts_offset == 0) {
973 format_video_sync = VSYNC_VSCFR;
975 if (format_video_sync == VSYNC_CFR && copy_ts) {
976 format_video_sync = VSYNC_VSCFR;
979 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
983 format_video_sync != VSYNC_PASSTHROUGH &&
984 format_video_sync != VSYNC_DROP) {
986 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
988 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
989 sync_ipts = ost->sync_opts;
994 switch (format_video_sync) {
996 if (ost->frame_number == 0 && delta0 >= 0.5) {
997 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1000 ost->sync_opts = lrint(sync_ipts);
1003 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1004 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1006 } else if (delta < -1.1)
1008 else if (delta > 1.1) {
1009 nb_frames = lrintf(delta);
1011 nb0_frames = lrintf(delta0 - 0.6);
1017 else if (delta > 0.6)
1018 ost->sync_opts = lrint(sync_ipts);
1021 case VSYNC_PASSTHROUGH:
1022 ost->sync_opts = lrint(sync_ipts);
1029 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1030 nb0_frames = FFMIN(nb0_frames, nb_frames);
1032 memmove(ost->last_nb0_frames + 1,
1033 ost->last_nb0_frames,
1034 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1035 ost->last_nb0_frames[0] = nb0_frames;
1037 if (nb0_frames == 0 && ost->last_dropped) {
1039 av_log(NULL, AV_LOG_VERBOSE,
1040 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1041 ost->frame_number, ost->st->index, ost->last_frame->pts);
1043 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1044 if (nb_frames > dts_error_threshold * 30) {
1045 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1049 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1050 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1052 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1054 /* duplicates frame if needed */
1055 for (i = 0; i < nb_frames; i++) {
1056 AVFrame *in_picture;
1057 av_init_packet(&pkt);
1061 if (i < nb0_frames && ost->last_frame) {
1062 in_picture = ost->last_frame;
1064 in_picture = next_picture;
1069 in_picture->pts = ost->sync_opts;
1072 if (!check_recording_time(ost))
1074 if (ost->frame_number >= ost->max_frames)
1078 #if FF_API_LAVF_FMT_RAWPICTURE
1079 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1080 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1081 /* raw pictures are written as AVPicture structure to
1082 avoid any copies. We support temporarily the older
1084 if (in_picture->interlaced_frame)
1085 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1087 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1088 pkt.data = (uint8_t *)in_picture;
1089 pkt.size = sizeof(AVPicture);
1090 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1091 pkt.flags |= AV_PKT_FLAG_KEY;
1093 write_frame(s, &pkt, ost);
1097 int got_packet, forced_keyframe = 0;
1100 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1101 ost->top_field_first >= 0)
1102 in_picture->top_field_first = !!ost->top_field_first;
1104 if (in_picture->interlaced_frame) {
1105 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1106 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1108 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1110 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1112 in_picture->quality = enc->global_quality;
1113 in_picture->pict_type = 0;
1115 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1116 in_picture->pts * av_q2d(enc->time_base) : NAN;
1117 if (ost->forced_kf_index < ost->forced_kf_count &&
1118 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1119 ost->forced_kf_index++;
1120 forced_keyframe = 1;
1121 } else if (ost->forced_keyframes_pexpr) {
1123 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1124 res = av_expr_eval(ost->forced_keyframes_pexpr,
1125 ost->forced_keyframes_expr_const_values, NULL);
1126 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1127 ost->forced_keyframes_expr_const_values[FKF_N],
1128 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1130 ost->forced_keyframes_expr_const_values[FKF_T],
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1134 forced_keyframe = 1;
1135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1136 ost->forced_keyframes_expr_const_values[FKF_N];
1137 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1138 ost->forced_keyframes_expr_const_values[FKF_T];
1139 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1142 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1143 } else if ( ost->forced_keyframes
1144 && !strncmp(ost->forced_keyframes, "source", 6)
1145 && in_picture->key_frame==1) {
1146 forced_keyframe = 1;
1149 if (forced_keyframe) {
1150 in_picture->pict_type = AV_PICTURE_TYPE_I;
1151 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1154 update_benchmark(NULL);
1156 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1157 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1158 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1159 enc->time_base.num, enc->time_base.den);
1162 ost->frames_encoded++;
1164 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1165 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1167 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1173 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1174 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1175 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1176 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1179 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1180 pkt.pts = ost->sync_opts;
1182 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1185 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1188 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1191 frame_size = pkt.size;
1192 write_frame(s, &pkt, ost);
1194 /* if two pass, output log */
1195 if (ost->logfile && enc->stats_out) {
1196 fprintf(ost->logfile, "%s", enc->stats_out);
1202 * For video, number of frames in == number of packets out.
1203 * But there may be reordering, so we can't throw away frames on encoder
1204 * flush, we need to limit them here, before they go into encoder.
1206 ost->frame_number++;
1208 if (vstats_filename && frame_size)
1209 do_video_stats(ost, frame_size);
1212 if (!ost->last_frame)
1213 ost->last_frame = av_frame_alloc();
1214 av_frame_unref(ost->last_frame);
1215 if (next_picture && ost->last_frame)
1216 av_frame_ref(ost->last_frame, next_picture);
1218 av_frame_free(&ost->last_frame);
1221 static double psnr(double d)
1223 return -10.0 * log10(d);
1226 static void do_video_stats(OutputStream *ost, int frame_size)
1228 AVCodecContext *enc;
1230 double ti1, bitrate, avg_bitrate;
1232 /* this is executed just the first time do_video_stats is called */
1234 vstats_file = fopen(vstats_filename, "w");
1242 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1243 frame_number = ost->st->nb_frames;
1244 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1245 ost->quality / (float)FF_QP2LAMBDA);
1247 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1248 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1250 fprintf(vstats_file,"f_size= %6d ", frame_size);
1251 /* compute pts value */
1252 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1256 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1257 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1258 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1259 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1260 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1264 static void finish_output_stream(OutputStream *ost)
1266 OutputFile *of = output_files[ost->file_index];
1269 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1272 for (i = 0; i < of->ctx->nb_streams; i++)
1273 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1278 * Get and encode new output from any of the filtergraphs, without causing
1281 * @return 0 for success, <0 for severe errors
1283 static int reap_filters(int flush)
1285 AVFrame *filtered_frame = NULL;
1288 /* Reap all buffers present in the buffer sinks */
1289 for (i = 0; i < nb_output_streams; i++) {
1290 OutputStream *ost = output_streams[i];
1291 OutputFile *of = output_files[ost->file_index];
1292 AVFilterContext *filter;
1293 AVCodecContext *enc = ost->enc_ctx;
1298 filter = ost->filter->filter;
1300 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1301 return AVERROR(ENOMEM);
1303 filtered_frame = ost->filtered_frame;
1306 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1307 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1308 AV_BUFFERSINK_FLAG_NO_REQUEST);
1310 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1311 av_log(NULL, AV_LOG_WARNING,
1312 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1313 } else if (flush && ret == AVERROR_EOF) {
1314 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1315 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1319 if (ost->finished) {
1320 av_frame_unref(filtered_frame);
1323 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1324 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1325 AVRational tb = enc->time_base;
1326 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1328 tb.den <<= extra_bits;
1330 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1331 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1332 float_pts /= 1 << extra_bits;
1333 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1334 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1336 filtered_frame->pts =
1337 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1338 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1340 //if (ost->source_index >= 0)
1341 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1343 switch (filter->inputs[0]->type) {
1344 case AVMEDIA_TYPE_VIDEO:
1345 if (!ost->frame_aspect_ratio.num)
1346 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1349 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1350 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1352 enc->time_base.num, enc->time_base.den);
1355 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1357 case AVMEDIA_TYPE_AUDIO:
1358 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1359 enc->channels != av_frame_get_channels(filtered_frame)) {
1360 av_log(NULL, AV_LOG_ERROR,
1361 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1364 do_audio_out(of->ctx, ost, filtered_frame);
1367 // TODO support subtitle filters
1371 av_frame_unref(filtered_frame);
1378 static void print_final_stats(int64_t total_size)
1380 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1381 uint64_t subtitle_size = 0;
1382 uint64_t data_size = 0;
1383 float percent = -1.0;
1387 for (i = 0; i < nb_output_streams; i++) {
1388 OutputStream *ost = output_streams[i];
1389 switch (ost->enc_ctx->codec_type) {
1390 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1391 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1392 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1393 default: other_size += ost->data_size; break;
1395 extra_size += ost->enc_ctx->extradata_size;
1396 data_size += ost->data_size;
1397 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1398 != AV_CODEC_FLAG_PASS1)
1402 if (data_size && total_size>0 && total_size >= data_size)
1403 percent = 100.0 * (total_size - data_size) / data_size;
1405 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1406 video_size / 1024.0,
1407 audio_size / 1024.0,
1408 subtitle_size / 1024.0,
1409 other_size / 1024.0,
1410 extra_size / 1024.0);
1412 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1414 av_log(NULL, AV_LOG_INFO, "unknown");
1415 av_log(NULL, AV_LOG_INFO, "\n");
1417 /* print verbose per-stream stats */
1418 for (i = 0; i < nb_input_files; i++) {
1419 InputFile *f = input_files[i];
1420 uint64_t total_packets = 0, total_size = 0;
1422 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1423 i, f->ctx->filename);
1425 for (j = 0; j < f->nb_streams; j++) {
1426 InputStream *ist = input_streams[f->ist_index + j];
1427 enum AVMediaType type = ist->dec_ctx->codec_type;
1429 total_size += ist->data_size;
1430 total_packets += ist->nb_packets;
1432 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1433 i, j, media_type_string(type));
1434 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1435 ist->nb_packets, ist->data_size);
1437 if (ist->decoding_needed) {
1438 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1439 ist->frames_decoded);
1440 if (type == AVMEDIA_TYPE_AUDIO)
1441 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1442 av_log(NULL, AV_LOG_VERBOSE, "; ");
1445 av_log(NULL, AV_LOG_VERBOSE, "\n");
1448 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1449 total_packets, total_size);
1452 for (i = 0; i < nb_output_files; i++) {
1453 OutputFile *of = output_files[i];
1454 uint64_t total_packets = 0, total_size = 0;
1456 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1457 i, of->ctx->filename);
1459 for (j = 0; j < of->ctx->nb_streams; j++) {
1460 OutputStream *ost = output_streams[of->ost_index + j];
1461 enum AVMediaType type = ost->enc_ctx->codec_type;
1463 total_size += ost->data_size;
1464 total_packets += ost->packets_written;
1466 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1467 i, j, media_type_string(type));
1468 if (ost->encoding_needed) {
1469 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1470 ost->frames_encoded);
1471 if (type == AVMEDIA_TYPE_AUDIO)
1472 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1473 av_log(NULL, AV_LOG_VERBOSE, "; ");
1476 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1477 ost->packets_written, ost->data_size);
1479 av_log(NULL, AV_LOG_VERBOSE, "\n");
1482 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1483 total_packets, total_size);
1485 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1486 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1488 av_log(NULL, AV_LOG_WARNING, "\n");
1490 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1495 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1498 AVBPrint buf_script;
1500 AVFormatContext *oc;
1502 AVCodecContext *enc;
1503 int frame_number, vid, i;
1506 int64_t pts = INT64_MIN + 1;
1507 static int64_t last_time = -1;
1508 static int qp_histogram[52];
1509 int hours, mins, secs, us;
1513 if (!print_stats && !is_last_report && !progress_avio)
1516 if (!is_last_report) {
1517 if (last_time == -1) {
1518 last_time = cur_time;
1521 if ((cur_time - last_time) < 500000)
1523 last_time = cur_time;
1526 t = (cur_time-timer_start) / 1000000.0;
1529 oc = output_files[0]->ctx;
1531 total_size = avio_size(oc->pb);
1532 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1533 total_size = avio_tell(oc->pb);
1537 av_bprint_init(&buf_script, 0, 1);
1538 for (i = 0; i < nb_output_streams; i++) {
1540 ost = output_streams[i];
1542 if (!ost->stream_copy)
1543 q = ost->quality / (float) FF_QP2LAMBDA;
1545 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1547 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1548 ost->file_index, ost->index, q);
1550 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1553 frame_number = ost->frame_number;
1554 fps = t > 1 ? frame_number / t : 0;
1555 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1556 frame_number, fps < 9.95, fps, q);
1557 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1558 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1559 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560 ost->file_index, ost->index, q);
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1566 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1568 for (j = 0; j < 32; j++)
1569 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1572 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1574 double error, error_sum = 0;
1575 double scale, scale_sum = 0;
1577 char type[3] = { 'Y','U','V' };
1578 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1579 for (j = 0; j < 3; j++) {
1580 if (is_last_report) {
1581 error = enc->error[j];
1582 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1584 error = ost->error[j];
1585 scale = enc->width * enc->height * 255.0 * 255.0;
1591 p = psnr(error / scale);
1592 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1593 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1594 ost->file_index, ost->index, type[j] | 32, p);
1596 p = psnr(error_sum / scale_sum);
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1598 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1599 ost->file_index, ost->index, p);
1603 /* compute min output value */
1604 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1605 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1606 ost->st->time_base, AV_TIME_BASE_Q));
1608 nb_frames_drop += ost->last_dropped;
1611 secs = FFABS(pts) / AV_TIME_BASE;
1612 us = FFABS(pts) % AV_TIME_BASE;
1618 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1619 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1621 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1623 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624 "size=%8.0fkB time=", total_size / 1024.0);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1629 (100 * us) / AV_TIME_BASE);
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633 av_bprintf(&buf_script, "bitrate=N/A\n");
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1639 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643 hours, mins, secs, us);
1645 if (nb_frames_dup || nb_frames_drop)
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1647 nb_frames_dup, nb_frames_drop);
1648 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1653 av_bprintf(&buf_script, "speed=N/A\n");
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1656 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1659 if (print_stats || is_last_report) {
1660 const char end = is_last_report ? '\n' : '\r';
1661 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662 fprintf(stderr, "%s %c", buf, end);
1664 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1669 if (progress_avio) {
1670 av_bprintf(&buf_script, "progress=%s\n",
1671 is_last_report ? "end" : "continue");
1672 avio_write(progress_avio, buf_script.str,
1673 FFMIN(buf_script.len, buf_script.size - 1));
1674 avio_flush(progress_avio);
1675 av_bprint_finalize(&buf_script, NULL);
1676 if (is_last_report) {
1677 if ((ret = avio_closep(&progress_avio)) < 0)
1678 av_log(NULL, AV_LOG_ERROR,
1679 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1684 print_final_stats(total_size);
1687 static void flush_encoders(void)
1691 for (i = 0; i < nb_output_streams; i++) {
1692 OutputStream *ost = output_streams[i];
1693 AVCodecContext *enc = ost->enc_ctx;
1694 AVFormatContext *os = output_files[ost->file_index]->ctx;
1695 int stop_encoding = 0;
1697 if (!ost->encoding_needed)
1700 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1702 #if FF_API_LAVF_FMT_RAWPICTURE
1703 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1708 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1711 switch (enc->codec_type) {
1712 case AVMEDIA_TYPE_AUDIO:
1713 encode = avcodec_encode_audio2;
1716 case AVMEDIA_TYPE_VIDEO:
1717 encode = avcodec_encode_video2;
1728 av_init_packet(&pkt);
1732 update_benchmark(NULL);
1733 ret = encode(enc, &pkt, NULL, &got_packet);
1734 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1736 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1741 if (ost->logfile && enc->stats_out) {
1742 fprintf(ost->logfile, "%s", enc->stats_out);
1748 if (ost->finished & MUXER_FINISHED) {
1749 av_packet_unref(&pkt);
1752 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1753 pkt_size = pkt.size;
1754 write_frame(os, &pkt, ost);
1755 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1756 do_video_stats(ost, pkt_size);
1767 * Check whether a packet from ist should be written into ost at this time
1769 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1771 OutputFile *of = output_files[ost->file_index];
1772 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1774 if (ost->source_index != ist_index)
1780 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1786 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1788 OutputFile *of = output_files[ost->file_index];
1789 InputFile *f = input_files [ist->file_index];
1790 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1791 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1795 av_init_packet(&opkt);
1797 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1798 !ost->copy_initial_nonkeyframes)
1801 if (!ost->frame_number && !ost->copy_prior_start) {
1802 int64_t comp_start = start_time;
1803 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1804 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1805 if (pkt->pts == AV_NOPTS_VALUE ?
1806 ist->pts < comp_start :
1807 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1811 if (of->recording_time != INT64_MAX &&
1812 ist->pts >= of->recording_time + start_time) {
1813 close_output_stream(ost);
1817 if (f->recording_time != INT64_MAX) {
1818 start_time = f->ctx->start_time;
1819 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1820 start_time += f->start_time;
1821 if (ist->pts >= f->recording_time + start_time) {
1822 close_output_stream(ost);
1827 /* force the input stream PTS */
1828 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1831 if (pkt->pts != AV_NOPTS_VALUE)
1832 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1834 opkt.pts = AV_NOPTS_VALUE;
1836 if (pkt->dts == AV_NOPTS_VALUE)
1837 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1839 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1840 opkt.dts -= ost_tb_start_time;
1842 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1843 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1845 duration = ist->dec_ctx->frame_size;
1846 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1847 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1848 ost->st->time_base) - ost_tb_start_time;
1851 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1852 opkt.flags = pkt->flags;
1853 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1854 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1855 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1856 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1857 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1859 int ret = av_parser_change(ost->parser, ost->st->codec,
1860 &opkt.data, &opkt.size,
1861 pkt->data, pkt->size,
1862 pkt->flags & AV_PKT_FLAG_KEY);
1864 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1869 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1874 opkt.data = pkt->data;
1875 opkt.size = pkt->size;
1877 av_copy_packet_side_data(&opkt, pkt);
1879 #if FF_API_LAVF_FMT_RAWPICTURE
1880 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1881 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1882 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1883 /* store AVPicture in AVPacket, as expected by the output format */
1884 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1886 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1890 opkt.data = (uint8_t *)&pict;
1891 opkt.size = sizeof(AVPicture);
1892 opkt.flags |= AV_PKT_FLAG_KEY;
1896 write_frame(of->ctx, &opkt, ost);
1899 int guess_input_channel_layout(InputStream *ist)
1901 AVCodecContext *dec = ist->dec_ctx;
1903 if (!dec->channel_layout) {
1904 char layout_name[256];
1906 if (dec->channels > ist->guess_layout_max)
1908 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1909 if (!dec->channel_layout)
1911 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1912 dec->channels, dec->channel_layout);
1913 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1914 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1919 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1921 if (*got_output || ret<0)
1922 decode_error_stat[ret<0] ++;
1924 if (ret < 0 && exit_on_error)
1927 if (exit_on_error && *got_output && ist) {
1928 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1929 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1935 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1937 AVFrame *decoded_frame, *f;
1938 AVCodecContext *avctx = ist->dec_ctx;
1939 int i, ret, err = 0, resample_changed;
1940 AVRational decoded_frame_tb;
1942 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1943 return AVERROR(ENOMEM);
1944 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1945 return AVERROR(ENOMEM);
1946 decoded_frame = ist->decoded_frame;
1948 update_benchmark(NULL);
1949 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1950 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1952 if (ret >= 0 && avctx->sample_rate <= 0) {
1953 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1954 ret = AVERROR_INVALIDDATA;
1957 check_decode_result(ist, got_output, ret);
1959 if (!*got_output || ret < 0)
1962 ist->samples_decoded += decoded_frame->nb_samples;
1963 ist->frames_decoded++;
1966 /* increment next_dts to use for the case where the input stream does not
1967 have timestamps or there are multiple frames in the packet */
1968 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1970 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1975 ist->resample_channels != avctx->channels ||
1976 ist->resample_channel_layout != decoded_frame->channel_layout ||
1977 ist->resample_sample_rate != decoded_frame->sample_rate;
1978 if (resample_changed) {
1979 char layout1[64], layout2[64];
1981 if (!guess_input_channel_layout(ist)) {
1982 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1983 "layout for Input Stream #%d.%d\n", ist->file_index,
1987 decoded_frame->channel_layout = avctx->channel_layout;
1989 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1990 ist->resample_channel_layout);
1991 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1992 decoded_frame->channel_layout);
1994 av_log(NULL, AV_LOG_INFO,
1995 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1996 ist->file_index, ist->st->index,
1997 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1998 ist->resample_channels, layout1,
1999 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2000 avctx->channels, layout2);
2002 ist->resample_sample_fmt = decoded_frame->format;
2003 ist->resample_sample_rate = decoded_frame->sample_rate;
2004 ist->resample_channel_layout = decoded_frame->channel_layout;
2005 ist->resample_channels = avctx->channels;
2007 for (i = 0; i < nb_filtergraphs; i++)
2008 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2009 FilterGraph *fg = filtergraphs[i];
2010 if (configure_filtergraph(fg) < 0) {
2011 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2017 /* if the decoder provides a pts, use it instead of the last packet pts.
2018 the decoder could be delaying output by a packet or more. */
2019 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2020 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2021 decoded_frame_tb = avctx->time_base;
2022 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2023 decoded_frame->pts = decoded_frame->pkt_pts;
2024 decoded_frame_tb = ist->st->time_base;
2025 } else if (pkt->pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = pkt->pts;
2027 decoded_frame_tb = ist->st->time_base;
2029 decoded_frame->pts = ist->dts;
2030 decoded_frame_tb = AV_TIME_BASE_Q;
2032 pkt->pts = AV_NOPTS_VALUE;
2033 if (decoded_frame->pts != AV_NOPTS_VALUE)
2034 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2035 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2036 (AVRational){1, avctx->sample_rate});
2037 ist->nb_samples = decoded_frame->nb_samples;
2038 for (i = 0; i < ist->nb_filters; i++) {
2039 if (i < ist->nb_filters - 1) {
2040 f = ist->filter_frame;
2041 err = av_frame_ref(f, decoded_frame);
2046 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2047 AV_BUFFERSRC_FLAG_PUSH);
2048 if (err == AVERROR_EOF)
2049 err = 0; /* ignore */
2053 decoded_frame->pts = AV_NOPTS_VALUE;
2055 av_frame_unref(ist->filter_frame);
2056 av_frame_unref(decoded_frame);
2057 return err < 0 ? err : ret;
2060 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2062 AVFrame *decoded_frame, *f;
2063 int i, ret = 0, err = 0, resample_changed;
2064 int64_t best_effort_timestamp;
2065 AVRational *frame_sample_aspect;
2067 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2068 return AVERROR(ENOMEM);
2069 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 decoded_frame = ist->decoded_frame;
2072 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2074 update_benchmark(NULL);
2075 ret = avcodec_decode_video2(ist->dec_ctx,
2076 decoded_frame, got_output, pkt);
2077 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2079 // The following line may be required in some cases where there is no parser
2080 // or the parser does not has_b_frames correctly
2081 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2082 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2083 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2085 av_log(ist->dec_ctx, AV_LOG_WARNING,
2086 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2087 "If you want to help, upload a sample "
2088 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2089 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2090 ist->dec_ctx->has_b_frames,
2091 ist->st->codec->has_b_frames);
2094 check_decode_result(ist, got_output, ret);
2096 if (*got_output && ret >= 0) {
2097 if (ist->dec_ctx->width != decoded_frame->width ||
2098 ist->dec_ctx->height != decoded_frame->height ||
2099 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2100 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2101 decoded_frame->width,
2102 decoded_frame->height,
2103 decoded_frame->format,
2104 ist->dec_ctx->width,
2105 ist->dec_ctx->height,
2106 ist->dec_ctx->pix_fmt);
2110 if (!*got_output || ret < 0)
2113 if(ist->top_field_first>=0)
2114 decoded_frame->top_field_first = ist->top_field_first;
2116 ist->frames_decoded++;
2118 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2119 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2123 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2125 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2126 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2127 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2129 if (ts != AV_NOPTS_VALUE)
2130 ist->next_pts = ist->pts = ts;
2134 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2135 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2136 ist->st->index, av_ts2str(decoded_frame->pts),
2137 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2138 best_effort_timestamp,
2139 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2140 decoded_frame->key_frame, decoded_frame->pict_type,
2141 ist->st->time_base.num, ist->st->time_base.den);
2146 if (ist->st->sample_aspect_ratio.num)
2147 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2149 resample_changed = ist->resample_width != decoded_frame->width ||
2150 ist->resample_height != decoded_frame->height ||
2151 ist->resample_pix_fmt != decoded_frame->format;
2152 if (resample_changed) {
2153 av_log(NULL, AV_LOG_INFO,
2154 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2155 ist->file_index, ist->st->index,
2156 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2157 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2159 ist->resample_width = decoded_frame->width;
2160 ist->resample_height = decoded_frame->height;
2161 ist->resample_pix_fmt = decoded_frame->format;
2163 for (i = 0; i < nb_filtergraphs; i++) {
2164 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2165 configure_filtergraph(filtergraphs[i]) < 0) {
2166 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2172 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2173 for (i = 0; i < ist->nb_filters; i++) {
2174 if (!frame_sample_aspect->num)
2175 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2177 if (i < ist->nb_filters - 1) {
2178 f = ist->filter_frame;
2179 err = av_frame_ref(f, decoded_frame);
2184 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2185 if (ret == AVERROR_EOF) {
2186 ret = 0; /* ignore */
2187 } else if (ret < 0) {
2188 av_log(NULL, AV_LOG_FATAL,
2189 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2195 av_frame_unref(ist->filter_frame);
2196 av_frame_unref(decoded_frame);
2197 return err < 0 ? err : ret;
2200 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2202 AVSubtitle subtitle;
2203 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2204 &subtitle, got_output, pkt);
2206 check_decode_result(NULL, got_output, ret);
2208 if (ret < 0 || !*got_output) {
2210 sub2video_flush(ist);
2214 if (ist->fix_sub_duration) {
2216 if (ist->prev_sub.got_output) {
2217 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2218 1000, AV_TIME_BASE);
2219 if (end < ist->prev_sub.subtitle.end_display_time) {
2220 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2221 "Subtitle duration reduced from %d to %d%s\n",
2222 ist->prev_sub.subtitle.end_display_time, end,
2223 end <= 0 ? ", dropping it" : "");
2224 ist->prev_sub.subtitle.end_display_time = end;
2227 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2228 FFSWAP(int, ret, ist->prev_sub.ret);
2229 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2237 sub2video_update(ist, &subtitle);
2239 if (!subtitle.num_rects)
2242 ist->frames_decoded++;
2244 for (i = 0; i < nb_output_streams; i++) {
2245 OutputStream *ost = output_streams[i];
2247 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2248 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2251 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2255 avsubtitle_free(&subtitle);
2259 static int send_filter_eof(InputStream *ist)
2262 for (i = 0; i < ist->nb_filters; i++) {
2263 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2270 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2271 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2277 if (!ist->saw_first_ts) {
2278 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2280 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2281 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2282 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2284 ist->saw_first_ts = 1;
2287 if (ist->next_dts == AV_NOPTS_VALUE)
2288 ist->next_dts = ist->dts;
2289 if (ist->next_pts == AV_NOPTS_VALUE)
2290 ist->next_pts = ist->pts;
2294 av_init_packet(&avpkt);
2302 if (pkt->dts != AV_NOPTS_VALUE) {
2303 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2304 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2305 ist->next_pts = ist->pts = ist->dts;
2308 // while we have more to decode or while the decoder did output something on EOF
2309 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2313 ist->pts = ist->next_pts;
2314 ist->dts = ist->next_dts;
2316 if (avpkt.size && avpkt.size != pkt->size &&
2317 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2318 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2319 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2320 ist->showed_multi_packet_warning = 1;
2323 switch (ist->dec_ctx->codec_type) {
2324 case AVMEDIA_TYPE_AUDIO:
2325 ret = decode_audio (ist, &avpkt, &got_output);
2327 case AVMEDIA_TYPE_VIDEO:
2328 ret = decode_video (ist, &avpkt, &got_output);
2329 if (avpkt.duration) {
2330 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2331 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2332 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2333 duration = ((int64_t)AV_TIME_BASE *
2334 ist->dec_ctx->framerate.den * ticks) /
2335 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2339 if(ist->dts != AV_NOPTS_VALUE && duration) {
2340 ist->next_dts += duration;
2342 ist->next_dts = AV_NOPTS_VALUE;
2345 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2347 case AVMEDIA_TYPE_SUBTITLE:
2348 ret = transcode_subtitles(ist, &avpkt, &got_output);
2355 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2356 ist->file_index, ist->st->index, av_err2str(ret));
2363 avpkt.pts= AV_NOPTS_VALUE;
2365 // touch data and size only if not EOF
2367 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2375 if (got_output && !pkt)
2379 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2380 /* except when looping we need to flush but not to send an EOF */
2381 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2382 int ret = send_filter_eof(ist);
2384 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2389 /* handle stream copy */
2390 if (!ist->decoding_needed) {
2391 ist->dts = ist->next_dts;
2392 switch (ist->dec_ctx->codec_type) {
2393 case AVMEDIA_TYPE_AUDIO:
2394 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2395 ist->dec_ctx->sample_rate;
2397 case AVMEDIA_TYPE_VIDEO:
2398 if (ist->framerate.num) {
2399 // TODO: Remove work-around for c99-to-c89 issue 7
2400 AVRational time_base_q = AV_TIME_BASE_Q;
2401 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2402 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2403 } else if (pkt->duration) {
2404 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2405 } else if(ist->dec_ctx->framerate.num != 0) {
2406 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2407 ist->next_dts += ((int64_t)AV_TIME_BASE *
2408 ist->dec_ctx->framerate.den * ticks) /
2409 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2413 ist->pts = ist->dts;
2414 ist->next_pts = ist->next_dts;
2416 for (i = 0; pkt && i < nb_output_streams; i++) {
2417 OutputStream *ost = output_streams[i];
2419 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2422 do_streamcopy(ist, ost, pkt);
2428 static void print_sdp(void)
2433 AVIOContext *sdp_pb;
2434 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2438 for (i = 0, j = 0; i < nb_output_files; i++) {
2439 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2440 avc[j] = output_files[i]->ctx;
2448 av_sdp_create(avc, j, sdp, sizeof(sdp));
2450 if (!sdp_filename) {
2451 printf("SDP:\n%s\n", sdp);
2454 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2455 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2457 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2458 avio_closep(&sdp_pb);
2459 av_freep(&sdp_filename);
2467 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2470 for (i = 0; hwaccels[i].name; i++)
2471 if (hwaccels[i].pix_fmt == pix_fmt)
2472 return &hwaccels[i];
2476 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2478 InputStream *ist = s->opaque;
2479 const enum AVPixelFormat *p;
2482 for (p = pix_fmts; *p != -1; p++) {
2483 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2484 const HWAccel *hwaccel;
2486 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2489 hwaccel = get_hwaccel(*p);
2491 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2492 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2495 ret = hwaccel->init(s);
2497 if (ist->hwaccel_id == hwaccel->id) {
2498 av_log(NULL, AV_LOG_FATAL,
2499 "%s hwaccel requested for input stream #%d:%d, "
2500 "but cannot be initialized.\n", hwaccel->name,
2501 ist->file_index, ist->st->index);
2502 return AV_PIX_FMT_NONE;
2506 ist->active_hwaccel_id = hwaccel->id;
2507 ist->hwaccel_pix_fmt = *p;
2514 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2516 InputStream *ist = s->opaque;
2518 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2519 return ist->hwaccel_get_buffer(s, frame, flags);
2521 return avcodec_default_get_buffer2(s, frame, flags);
2524 static int init_input_stream(int ist_index, char *error, int error_len)
2527 InputStream *ist = input_streams[ist_index];
2529 if (ist->decoding_needed) {
2530 AVCodec *codec = ist->dec;
2532 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2533 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2534 return AVERROR(EINVAL);
2537 ist->dec_ctx->opaque = ist;
2538 ist->dec_ctx->get_format = get_format;
2539 ist->dec_ctx->get_buffer2 = get_buffer;
2540 ist->dec_ctx->thread_safe_callbacks = 1;
2542 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2543 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2544 (ist->decoding_needed & DECODING_FOR_OST)) {
2545 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2546 if (ist->decoding_needed & DECODING_FOR_FILTER)
2547 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2550 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2552 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2553 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2554 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2555 if (ret == AVERROR_EXPERIMENTAL)
2556 abort_codec_experimental(codec, 0);
2558 snprintf(error, error_len,
2559 "Error while opening decoder for input stream "
2561 ist->file_index, ist->st->index, av_err2str(ret));
2564 assert_avoptions(ist->decoder_opts);
2567 ist->next_pts = AV_NOPTS_VALUE;
2568 ist->next_dts = AV_NOPTS_VALUE;
2573 static InputStream *get_input_stream(OutputStream *ost)
2575 if (ost->source_index >= 0)
2576 return input_streams[ost->source_index];
2580 static int compare_int64(const void *a, const void *b)
2582 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2585 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2589 if (ost->encoding_needed) {
2590 AVCodec *codec = ost->enc;
2591 AVCodecContext *dec = NULL;
2594 if ((ist = get_input_stream(ost)))
2596 if (dec && dec->subtitle_header) {
2597 /* ASS code assumes this buffer is null terminated so add extra byte. */
2598 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2599 if (!ost->enc_ctx->subtitle_header)
2600 return AVERROR(ENOMEM);
2601 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2602 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2604 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2605 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2606 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2608 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2609 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2610 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2612 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2613 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2614 if (!ost->enc_ctx->hw_frames_ctx)
2615 return AVERROR(ENOMEM);
2618 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2619 if (ret == AVERROR_EXPERIMENTAL)
2620 abort_codec_experimental(codec, 1);
2621 snprintf(error, error_len,
2622 "Error while opening encoder for output stream #%d:%d - "
2623 "maybe incorrect parameters such as bit_rate, rate, width or height",
2624 ost->file_index, ost->index);
2627 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2628 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2629 av_buffersink_set_frame_size(ost->filter->filter,
2630 ost->enc_ctx->frame_size);
2631 assert_avoptions(ost->encoder_opts);
2632 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2633 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2634 " It takes bits/s as argument, not kbits/s\n");
2636 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2638 av_log(NULL, AV_LOG_FATAL,
2639 "Error initializing the output stream codec context.\n");
2643 if (ost->enc_ctx->nb_coded_side_data) {
2646 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2647 sizeof(*ost->st->side_data));
2648 if (!ost->st->side_data)
2649 return AVERROR(ENOMEM);
2651 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2652 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2653 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2655 sd_dst->data = av_malloc(sd_src->size);
2657 return AVERROR(ENOMEM);
2658 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2659 sd_dst->size = sd_src->size;
2660 sd_dst->type = sd_src->type;
2661 ost->st->nb_side_data++;
2665 // copy timebase while removing common factors
2666 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2667 ost->st->codec->codec= ost->enc_ctx->codec;
2669 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2671 av_log(NULL, AV_LOG_FATAL,
2672 "Error setting up codec context options.\n");
2675 // copy timebase while removing common factors
2676 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2682 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2683 AVCodecContext *avctx)
2686 int n = 1, i, size, index = 0;
2689 for (p = kf; *p; p++)
2693 pts = av_malloc_array(size, sizeof(*pts));
2695 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2700 for (i = 0; i < n; i++) {
2701 char *next = strchr(p, ',');
2706 if (!memcmp(p, "chapters", 8)) {
2708 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2711 if (avf->nb_chapters > INT_MAX - size ||
2712 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2714 av_log(NULL, AV_LOG_FATAL,
2715 "Could not allocate forced key frames array.\n");
2718 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2719 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2721 for (j = 0; j < avf->nb_chapters; j++) {
2722 AVChapter *c = avf->chapters[j];
2723 av_assert1(index < size);
2724 pts[index++] = av_rescale_q(c->start, c->time_base,
2725 avctx->time_base) + t;
2730 t = parse_time_or_die("force_key_frames", p, 1);
2731 av_assert1(index < size);
2732 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2739 av_assert0(index == size);
2740 qsort(pts, size, sizeof(*pts), compare_int64);
2741 ost->forced_kf_count = size;
2742 ost->forced_kf_pts = pts;
2745 static void report_new_stream(int input_index, AVPacket *pkt)
2747 InputFile *file = input_files[input_index];
2748 AVStream *st = file->ctx->streams[pkt->stream_index];
2750 if (pkt->stream_index < file->nb_streams_warn)
2752 av_log(file->ctx, AV_LOG_WARNING,
2753 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2754 av_get_media_type_string(st->codec->codec_type),
2755 input_index, pkt->stream_index,
2756 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2757 file->nb_streams_warn = pkt->stream_index + 1;
2760 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2762 AVDictionaryEntry *e;
2764 uint8_t *encoder_string;
2765 int encoder_string_len;
2766 int format_flags = 0;
2767 int codec_flags = 0;
2769 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2772 e = av_dict_get(of->opts, "fflags", NULL, 0);
2774 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2777 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2779 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2781 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2784 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2787 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2788 encoder_string = av_mallocz(encoder_string_len);
2789 if (!encoder_string)
2792 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2793 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2795 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2796 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2797 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2798 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2801 static int transcode_init(void)
2803 int ret = 0, i, j, k;
2804 AVFormatContext *oc;
2807 char error[1024] = {0};
2810 for (i = 0; i < nb_filtergraphs; i++) {
2811 FilterGraph *fg = filtergraphs[i];
2812 for (j = 0; j < fg->nb_outputs; j++) {
2813 OutputFilter *ofilter = fg->outputs[j];
2814 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2816 if (fg->nb_inputs != 1)
2818 for (k = nb_input_streams-1; k >= 0 ; k--)
2819 if (fg->inputs[0]->ist == input_streams[k])
2821 ofilter->ost->source_index = k;
2825 /* init framerate emulation */
2826 for (i = 0; i < nb_input_files; i++) {
2827 InputFile *ifile = input_files[i];
2828 if (ifile->rate_emu)
2829 for (j = 0; j < ifile->nb_streams; j++)
2830 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2833 /* for each output stream, we compute the right encoding parameters */
2834 for (i = 0; i < nb_output_streams; i++) {
2835 AVCodecContext *enc_ctx;
2836 AVCodecContext *dec_ctx = NULL;
2837 ost = output_streams[i];
2838 oc = output_files[ost->file_index]->ctx;
2839 ist = get_input_stream(ost);
2841 if (ost->attachment_filename)
2844 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2847 dec_ctx = ist->dec_ctx;
2849 ost->st->disposition = ist->st->disposition;
2850 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2851 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2853 for (j=0; j<oc->nb_streams; j++) {
2854 AVStream *st = oc->streams[j];
2855 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2858 if (j == oc->nb_streams)
2859 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2860 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2863 if (ost->stream_copy) {
2865 uint64_t extra_size;
2867 av_assert0(ist && !ost->filter);
2869 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2871 if (extra_size > INT_MAX) {
2872 return AVERROR(EINVAL);
2875 /* if stream_copy is selected, no need to decode or encode */
2876 enc_ctx->codec_id = dec_ctx->codec_id;
2877 enc_ctx->codec_type = dec_ctx->codec_type;
2879 if (!enc_ctx->codec_tag) {
2880 unsigned int codec_tag;
2881 if (!oc->oformat->codec_tag ||
2882 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2883 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2884 enc_ctx->codec_tag = dec_ctx->codec_tag;
2887 enc_ctx->bit_rate = dec_ctx->bit_rate;
2888 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2889 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2890 enc_ctx->field_order = dec_ctx->field_order;
2891 if (dec_ctx->extradata_size) {
2892 enc_ctx->extradata = av_mallocz(extra_size);
2893 if (!enc_ctx->extradata) {
2894 return AVERROR(ENOMEM);
2896 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2898 enc_ctx->extradata_size= dec_ctx->extradata_size;
2899 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2901 enc_ctx->time_base = ist->st->time_base;
2903 * Avi is a special case here because it supports variable fps but
2904 * having the fps and timebase differe significantly adds quite some
2907 if(!strcmp(oc->oformat->name, "avi")) {
2908 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2909 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2910 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2911 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2913 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2914 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2915 enc_ctx->ticks_per_frame = 2;
2916 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2917 && av_q2d(ist->st->time_base) < 1.0/500
2919 enc_ctx->time_base = dec_ctx->time_base;
2920 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2921 enc_ctx->time_base.den *= 2;
2922 enc_ctx->ticks_per_frame = 2;
2924 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2925 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2926 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2927 && strcmp(oc->oformat->name, "f4v")
2929 if( copy_tb<0 && dec_ctx->time_base.den
2930 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2931 && av_q2d(ist->st->time_base) < 1.0/500
2933 enc_ctx->time_base = dec_ctx->time_base;
2934 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2937 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2938 && dec_ctx->time_base.num < dec_ctx->time_base.den
2939 && dec_ctx->time_base.num > 0
2940 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2941 enc_ctx->time_base = dec_ctx->time_base;
2944 if (!ost->frame_rate.num)
2945 ost->frame_rate = ist->framerate;
2946 if(ost->frame_rate.num)
2947 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2949 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2950 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2952 if (ist->st->nb_side_data) {
2953 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2954 sizeof(*ist->st->side_data));
2955 if (!ost->st->side_data)
2956 return AVERROR(ENOMEM);
2958 ost->st->nb_side_data = 0;
2959 for (j = 0; j < ist->st->nb_side_data; j++) {
2960 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2961 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2963 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2966 sd_dst->data = av_malloc(sd_src->size);
2968 return AVERROR(ENOMEM);
2969 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2970 sd_dst->size = sd_src->size;
2971 sd_dst->type = sd_src->type;
2972 ost->st->nb_side_data++;
2976 ost->parser = av_parser_init(enc_ctx->codec_id);
2978 switch (enc_ctx->codec_type) {
2979 case AVMEDIA_TYPE_AUDIO:
2980 if (audio_volume != 256) {
2981 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2984 enc_ctx->channel_layout = dec_ctx->channel_layout;
2985 enc_ctx->sample_rate = dec_ctx->sample_rate;
2986 enc_ctx->channels = dec_ctx->channels;
2987 enc_ctx->frame_size = dec_ctx->frame_size;
2988 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2989 enc_ctx->block_align = dec_ctx->block_align;
2990 enc_ctx->initial_padding = dec_ctx->delay;
2991 enc_ctx->profile = dec_ctx->profile;
2992 #if FF_API_AUDIOENC_DELAY
2993 enc_ctx->delay = dec_ctx->delay;
2995 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2996 enc_ctx->block_align= 0;
2997 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2998 enc_ctx->block_align= 0;
3000 case AVMEDIA_TYPE_VIDEO:
3001 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3002 enc_ctx->width = dec_ctx->width;
3003 enc_ctx->height = dec_ctx->height;
3004 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3005 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3007 av_mul_q(ost->frame_aspect_ratio,
3008 (AVRational){ enc_ctx->height, enc_ctx->width });
3009 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3010 "with stream copy may produce invalid files\n");
3012 else if (ist->st->sample_aspect_ratio.num)
3013 sar = ist->st->sample_aspect_ratio;
3015 sar = dec_ctx->sample_aspect_ratio;
3016 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3017 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3018 ost->st->r_frame_rate = ist->st->r_frame_rate;
3020 case AVMEDIA_TYPE_SUBTITLE:
3021 enc_ctx->width = dec_ctx->width;
3022 enc_ctx->height = dec_ctx->height;
3024 case AVMEDIA_TYPE_UNKNOWN:
3025 case AVMEDIA_TYPE_DATA:
3026 case AVMEDIA_TYPE_ATTACHMENT:
3033 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3035 /* should only happen when a default codec is not present. */
3036 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3037 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3038 ret = AVERROR(EINVAL);
3042 set_encoder_id(output_files[ost->file_index], ost);
3045 if (qsv_transcode_init(ost))
3050 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3051 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3053 fg = init_simple_filtergraph(ist, ost);
3054 if (configure_filtergraph(fg)) {
3055 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3060 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3061 if (!ost->frame_rate.num)
3062 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3063 if (ist && !ost->frame_rate.num)
3064 ost->frame_rate = ist->framerate;
3065 if (ist && !ost->frame_rate.num)
3066 ost->frame_rate = ist->st->r_frame_rate;
3067 if (ist && !ost->frame_rate.num) {
3068 ost->frame_rate = (AVRational){25, 1};
3069 av_log(NULL, AV_LOG_WARNING,
3071 "about the input framerate is available. Falling "
3072 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3073 "if you want a different framerate.\n",
3074 ost->file_index, ost->index);
3076 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3077 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3078 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3079 ost->frame_rate = ost->enc->supported_framerates[idx];
3081 // reduce frame rate for mpeg4 to be within the spec limits
3082 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3083 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3084 ost->frame_rate.num, ost->frame_rate.den, 65535);
3088 switch (enc_ctx->codec_type) {
3089 case AVMEDIA_TYPE_AUDIO:
3090 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3091 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3092 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3093 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3094 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3096 case AVMEDIA_TYPE_VIDEO:
3097 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3098 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3099 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3100 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3101 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3102 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3103 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3105 for (j = 0; j < ost->forced_kf_count; j++)
3106 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3108 enc_ctx->time_base);
3110 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3111 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3112 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3113 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3114 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3115 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3116 if (!strncmp(ost->enc->name, "libx264", 7) &&
3117 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3118 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3119 av_log(NULL, AV_LOG_WARNING,
3120 "No pixel format specified, %s for H.264 encoding chosen.\n"
3121 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3122 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3123 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3124 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3125 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3126 av_log(NULL, AV_LOG_WARNING,
3127 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3128 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3129 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3130 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3132 ost->st->avg_frame_rate = ost->frame_rate;
3135 enc_ctx->width != dec_ctx->width ||
3136 enc_ctx->height != dec_ctx->height ||
3137 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3138 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3141 if (ost->forced_keyframes) {
3142 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3143 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3144 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3146 av_log(NULL, AV_LOG_ERROR,
3147 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3150 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3151 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3152 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3153 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3155 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3156 // parse it only for static kf timings
3157 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3158 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3162 case AVMEDIA_TYPE_SUBTITLE:
3163 enc_ctx->time_base = (AVRational){1, 1000};
3164 if (!enc_ctx->width) {
3165 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3166 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3169 case AVMEDIA_TYPE_DATA:
3177 if (ost->disposition) {
3178 static const AVOption opts[] = {
3179 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3180 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3181 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3182 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3183 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3184 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3185 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3186 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3187 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3188 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3189 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3190 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3191 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3192 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3195 static const AVClass class = {
3197 .item_name = av_default_item_name,
3199 .version = LIBAVUTIL_VERSION_INT,
3201 const AVClass *pclass = &class;
3203 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3209 /* init input streams */
3210 for (i = 0; i < nb_input_streams; i++)
3211 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3212 for (i = 0; i < nb_output_streams; i++) {
3213 ost = output_streams[i];
3214 avcodec_close(ost->enc_ctx);
3219 /* open each encoder */
3220 for (i = 0; i < nb_output_streams; i++) {
3221 ret = init_output_stream(output_streams[i], error, sizeof(error));
3226 /* discard unused programs */
3227 for (i = 0; i < nb_input_files; i++) {
3228 InputFile *ifile = input_files[i];
3229 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3230 AVProgram *p = ifile->ctx->programs[j];
3231 int discard = AVDISCARD_ALL;
3233 for (k = 0; k < p->nb_stream_indexes; k++)
3234 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3235 discard = AVDISCARD_DEFAULT;
3238 p->discard = discard;
3242 /* open files and write file headers */
3243 for (i = 0; i < nb_output_files; i++) {
3244 oc = output_files[i]->ctx;
3245 oc->interrupt_callback = int_cb;
3246 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3247 snprintf(error, sizeof(error),
3248 "Could not write header for output file #%d "
3249 "(incorrect codec parameters ?): %s",
3250 i, av_err2str(ret));
3251 ret = AVERROR(EINVAL);
3254 // assert_avoptions(output_files[i]->opts);
3255 if (strcmp(oc->oformat->name, "rtp")) {
3261 /* dump the file output parameters - cannot be done before in case
3263 for (i = 0; i < nb_output_files; i++) {
3264 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3267 /* dump the stream mapping */
3268 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3269 for (i = 0; i < nb_input_streams; i++) {
3270 ist = input_streams[i];
3272 for (j = 0; j < ist->nb_filters; j++) {
3273 if (ist->filters[j]->graph->graph_desc) {
3274 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3275 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3276 ist->filters[j]->name);
3277 if (nb_filtergraphs > 1)
3278 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3279 av_log(NULL, AV_LOG_INFO, "\n");
3284 for (i = 0; i < nb_output_streams; i++) {
3285 ost = output_streams[i];
3287 if (ost->attachment_filename) {
3288 /* an attached file */
3289 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3290 ost->attachment_filename, ost->file_index, ost->index);
3294 if (ost->filter && ost->filter->graph->graph_desc) {
3295 /* output from a complex graph */
3296 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3297 if (nb_filtergraphs > 1)
3298 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3300 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3301 ost->index, ost->enc ? ost->enc->name : "?");
3305 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3306 input_streams[ost->source_index]->file_index,
3307 input_streams[ost->source_index]->st->index,
3310 if (ost->sync_ist != input_streams[ost->source_index])
3311 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3312 ost->sync_ist->file_index,
3313 ost->sync_ist->st->index);
3314 if (ost->stream_copy)
3315 av_log(NULL, AV_LOG_INFO, " (copy)");
3317 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3318 const AVCodec *out_codec = ost->enc;
3319 const char *decoder_name = "?";
3320 const char *in_codec_name = "?";
3321 const char *encoder_name = "?";
3322 const char *out_codec_name = "?";
3323 const AVCodecDescriptor *desc;
3326 decoder_name = in_codec->name;
3327 desc = avcodec_descriptor_get(in_codec->id);
3329 in_codec_name = desc->name;
3330 if (!strcmp(decoder_name, in_codec_name))
3331 decoder_name = "native";
3335 encoder_name = out_codec->name;
3336 desc = avcodec_descriptor_get(out_codec->id);
3338 out_codec_name = desc->name;
3339 if (!strcmp(encoder_name, out_codec_name))
3340 encoder_name = "native";
3343 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3344 in_codec_name, decoder_name,
3345 out_codec_name, encoder_name);
3347 av_log(NULL, AV_LOG_INFO, "\n");
3351 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3355 if (sdp_filename || want_sdp) {
3359 transcode_init_done = 1;
3364 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3365 static int need_output(void)
3369 for (i = 0; i < nb_output_streams; i++) {
3370 OutputStream *ost = output_streams[i];
3371 OutputFile *of = output_files[ost->file_index];
3372 AVFormatContext *os = output_files[ost->file_index]->ctx;
3374 if (ost->finished ||
3375 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3377 if (ost->frame_number >= ost->max_frames) {
3379 for (j = 0; j < of->ctx->nb_streams; j++)
3380 close_output_stream(output_streams[of->ost_index + j]);
3391 * Select the output stream to process.
3393 * @return selected output stream, or NULL if none available
3395 static OutputStream *choose_output(void)
3398 int64_t opts_min = INT64_MAX;
3399 OutputStream *ost_min = NULL;
3401 for (i = 0; i < nb_output_streams; i++) {
3402 OutputStream *ost = output_streams[i];
3403 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3404 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3406 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3407 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3409 if (!ost->finished && opts < opts_min) {
3411 ost_min = ost->unavailable ? NULL : ost;
3417 static void set_tty_echo(int on)
3421 if (tcgetattr(0, &tty) == 0) {
3422 if (on) tty.c_lflag |= ECHO;
3423 else tty.c_lflag &= ~ECHO;
3424 tcsetattr(0, TCSANOW, &tty);
3429 static int check_keyboard_interaction(int64_t cur_time)
3432 static int64_t last_time;
3433 if (received_nb_signals)
3434 return AVERROR_EXIT;
3435 /* read_key() returns 0 on EOF */
3436 if(cur_time - last_time >= 100000 && !run_as_daemon){
3438 last_time = cur_time;
3442 return AVERROR_EXIT;
3443 if (key == '+') av_log_set_level(av_log_get_level()+10);
3444 if (key == '-') av_log_set_level(av_log_get_level()-10);
3445 if (key == 's') qp_hist ^= 1;
3448 do_hex_dump = do_pkt_dump = 0;
3449 } else if(do_pkt_dump){
3453 av_log_set_level(AV_LOG_DEBUG);
3455 if (key == 'c' || key == 'C'){
3456 char buf[4096], target[64], command[256], arg[256] = {0};
3459 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3462 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3467 fprintf(stderr, "\n");
3469 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3470 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3471 target, time, command, arg);
3472 for (i = 0; i < nb_filtergraphs; i++) {
3473 FilterGraph *fg = filtergraphs[i];
3476 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3477 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3478 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3479 } else if (key == 'c') {
3480 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3481 ret = AVERROR_PATCHWELCOME;
3483 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3485 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3490 av_log(NULL, AV_LOG_ERROR,
3491 "Parse error, at least 3 arguments were expected, "
3492 "only %d given in string '%s'\n", n, buf);
3495 if (key == 'd' || key == 'D'){
3498 debug = input_streams[0]->st->codec->debug<<1;
3499 if(!debug) debug = 1;
3500 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3507 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3512 fprintf(stderr, "\n");
3513 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3514 fprintf(stderr,"error parsing debug value\n");
3516 for(i=0;i<nb_input_streams;i++) {
3517 input_streams[i]->st->codec->debug = debug;
3519 for(i=0;i<nb_output_streams;i++) {
3520 OutputStream *ost = output_streams[i];
3521 ost->enc_ctx->debug = debug;
3523 if(debug) av_log_set_level(AV_LOG_DEBUG);
3524 fprintf(stderr,"debug=%d\n", debug);
3527 fprintf(stderr, "key function\n"
3528 "? show this help\n"
3529 "+ increase verbosity\n"
3530 "- decrease verbosity\n"
3531 "c Send command to first matching filter supporting it\n"
3532 "C Send/Que command to all matching filters\n"
3533 "D cycle through available debug modes\n"
3534 "h dump packets/hex press to cycle through the 3 states\n"
3536 "s Show QP histogram\n"
3543 static void *input_thread(void *arg)
3546 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3551 ret = av_read_frame(f->ctx, &pkt);
3553 if (ret == AVERROR(EAGAIN)) {
3558 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3561 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3562 if (flags && ret == AVERROR(EAGAIN)) {
3564 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3565 av_log(f->ctx, AV_LOG_WARNING,
3566 "Thread message queue blocking; consider raising the "
3567 "thread_queue_size option (current value: %d)\n",
3568 f->thread_queue_size);
3571 if (ret != AVERROR_EOF)
3572 av_log(f->ctx, AV_LOG_ERROR,
3573 "Unable to send packet to main thread: %s\n",
3575 av_packet_unref(&pkt);
3576 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3584 static void free_input_threads(void)
3588 for (i = 0; i < nb_input_files; i++) {
3589 InputFile *f = input_files[i];
3592 if (!f || !f->in_thread_queue)
3594 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3595 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3596 av_packet_unref(&pkt);
3598 pthread_join(f->thread, NULL);
3600 av_thread_message_queue_free(&f->in_thread_queue);
3604 static int init_input_threads(void)
3608 if (nb_input_files == 1)
3611 for (i = 0; i < nb_input_files; i++) {
3612 InputFile *f = input_files[i];
3614 if (f->ctx->pb ? !f->ctx->pb->seekable :
3615 strcmp(f->ctx->iformat->name, "lavfi"))
3616 f->non_blocking = 1;
3617 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3618 f->thread_queue_size, sizeof(AVPacket));
3622 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3623 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3624 av_thread_message_queue_free(&f->in_thread_queue);
3625 return AVERROR(ret);
3631 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3633 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3635 AV_THREAD_MESSAGE_NONBLOCK : 0);
3639 static int get_input_packet(InputFile *f, AVPacket *pkt)
3643 for (i = 0; i < f->nb_streams; i++) {
3644 InputStream *ist = input_streams[f->ist_index + i];
3645 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3646 int64_t now = av_gettime_relative() - ist->start;
3648 return AVERROR(EAGAIN);
3653 if (nb_input_files > 1)
3654 return get_input_packet_mt(f, pkt);
3656 return av_read_frame(f->ctx, pkt);
3659 static int got_eagain(void)
3662 for (i = 0; i < nb_output_streams; i++)
3663 if (output_streams[i]->unavailable)
3668 static void reset_eagain(void)
3671 for (i = 0; i < nb_input_files; i++)
3672 input_files[i]->eagain = 0;
3673 for (i = 0; i < nb_output_streams; i++)
3674 output_streams[i]->unavailable = 0;
3677 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3678 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3679 AVRational time_base)
3685 return tmp_time_base;
3688 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3691 return tmp_time_base;
3697 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3700 AVCodecContext *avctx;
3701 int i, ret, has_audio = 0;
3702 int64_t duration = 0;
3704 ret = av_seek_frame(is, -1, is->start_time, 0);
3708 for (i = 0; i < ifile->nb_streams; i++) {
3709 ist = input_streams[ifile->ist_index + i];
3710 avctx = ist->dec_ctx;
3713 if (ist->decoding_needed) {
3714 process_input_packet(ist, NULL, 1);
3715 avcodec_flush_buffers(avctx);
3718 /* duration is the length of the last frame in a stream
3719 * when audio stream is present we don't care about
3720 * last video frame length because it's not defined exactly */
3721 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3725 for (i = 0; i < ifile->nb_streams; i++) {
3726 ist = input_streams[ifile->ist_index + i];
3727 avctx = ist->dec_ctx;
3730 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3731 AVRational sample_rate = {1, avctx->sample_rate};
3733 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3737 if (ist->framerate.num) {
3738 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3739 } else if (ist->st->avg_frame_rate.num) {
3740 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3741 } else duration = 1;
3743 if (!ifile->duration)
3744 ifile->time_base = ist->st->time_base;
3745 /* the total duration of the stream, max_pts - min_pts is
3746 * the duration of the stream without the last frame */
3747 duration += ist->max_pts - ist->min_pts;
3748 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3752 if (ifile->loop > 0)
3760 * - 0 -- one packet was read and processed
3761 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3762 * this function should be called again
3763 * - AVERROR_EOF -- this function should not be called again
3765 static int process_input(int file_index)
3767 InputFile *ifile = input_files[file_index];
3768 AVFormatContext *is;
3776 ret = get_input_packet(ifile, &pkt);
3778 if (ret == AVERROR(EAGAIN)) {
3782 if (ret < 0 && ifile->loop) {
3783 if ((ret = seek_to_start(ifile, is)) < 0)
3785 ret = get_input_packet(ifile, &pkt);
3788 if (ret != AVERROR_EOF) {
3789 print_error(is->filename, ret);
3794 for (i = 0; i < ifile->nb_streams; i++) {
3795 ist = input_streams[ifile->ist_index + i];
3796 if (ist->decoding_needed) {
3797 ret = process_input_packet(ist, NULL, 0);
3802 /* mark all outputs that don't go through lavfi as finished */
3803 for (j = 0; j < nb_output_streams; j++) {
3804 OutputStream *ost = output_streams[j];
3806 if (ost->source_index == ifile->ist_index + i &&
3807 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3808 finish_output_stream(ost);
3812 ifile->eof_reached = 1;
3813 return AVERROR(EAGAIN);
3819 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3820 is->streams[pkt.stream_index]);
3822 /* the following test is needed in case new streams appear
3823 dynamically in stream : we ignore them */
3824 if (pkt.stream_index >= ifile->nb_streams) {
3825 report_new_stream(file_index, &pkt);
3826 goto discard_packet;
3829 ist = input_streams[ifile->ist_index + pkt.stream_index];
3831 ist->data_size += pkt.size;
3835 goto discard_packet;
3837 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3838 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3843 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3844 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3845 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3846 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3847 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3848 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3849 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3850 av_ts2str(input_files[ist->file_index]->ts_offset),
3851 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3854 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3855 int64_t stime, stime2;
3856 // Correcting starttime based on the enabled streams
3857 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3858 // so we instead do it here as part of discontinuity handling
3859 if ( ist->next_dts == AV_NOPTS_VALUE
3860 && ifile->ts_offset == -is->start_time
3861 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3862 int64_t new_start_time = INT64_MAX;
3863 for (i=0; i<is->nb_streams; i++) {
3864 AVStream *st = is->streams[i];
3865 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3867 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3869 if (new_start_time > is->start_time) {
3870 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3871 ifile->ts_offset = -new_start_time;
3875 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3876 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3877 ist->wrap_correction_done = 1;
3879 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3880 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3881 ist->wrap_correction_done = 0;
3883 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3884 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3885 ist->wrap_correction_done = 0;
3889 /* add the stream-global side data to the first packet */
3890 if (ist->nb_packets == 1) {
3891 if (ist->st->nb_side_data)
3892 av_packet_split_side_data(&pkt);
3893 for (i = 0; i < ist->st->nb_side_data; i++) {
3894 AVPacketSideData *src_sd = &ist->st->side_data[i];
3897 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3899 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3902 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3906 memcpy(dst_data, src_sd->data, src_sd->size);
3910 if (pkt.dts != AV_NOPTS_VALUE)
3911 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3912 if (pkt.pts != AV_NOPTS_VALUE)
3913 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3915 if (pkt.pts != AV_NOPTS_VALUE)
3916 pkt.pts *= ist->ts_scale;
3917 if (pkt.dts != AV_NOPTS_VALUE)
3918 pkt.dts *= ist->ts_scale;
3920 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3921 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3922 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3923 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3924 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3925 int64_t delta = pkt_dts - ifile->last_ts;
3926 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3927 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3928 ifile->ts_offset -= delta;
3929 av_log(NULL, AV_LOG_DEBUG,
3930 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3931 delta, ifile->ts_offset);
3932 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3933 if (pkt.pts != AV_NOPTS_VALUE)
3934 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3938 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3939 if (pkt.pts != AV_NOPTS_VALUE) {
3940 pkt.pts += duration;
3941 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3942 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3945 if (pkt.dts != AV_NOPTS_VALUE)
3946 pkt.dts += duration;
3948 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3949 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3950 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3951 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3953 int64_t delta = pkt_dts - ist->next_dts;
3954 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3955 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3956 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3957 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3958 ifile->ts_offset -= delta;
3959 av_log(NULL, AV_LOG_DEBUG,
3960 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3961 delta, ifile->ts_offset);
3962 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3963 if (pkt.pts != AV_NOPTS_VALUE)
3964 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3967 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3968 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3969 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3970 pkt.dts = AV_NOPTS_VALUE;
3972 if (pkt.pts != AV_NOPTS_VALUE){
3973 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3974 delta = pkt_pts - ist->next_dts;
3975 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3976 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3977 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3978 pkt.pts = AV_NOPTS_VALUE;
3984 if (pkt.dts != AV_NOPTS_VALUE)
3985 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3988 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3989 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3990 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3991 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3992 av_ts2str(input_files[ist->file_index]->ts_offset),
3993 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3996 sub2video_heartbeat(ist, pkt.pts);
3998 process_input_packet(ist, &pkt, 0);
4001 av_packet_unref(&pkt);
4007 * Perform a step of transcoding for the specified filter graph.
4009 * @param[in] graph filter graph to consider
4010 * @param[out] best_ist input stream where a frame would allow to continue
4011 * @return 0 for success, <0 for error
4013 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4016 int nb_requests, nb_requests_max = 0;
4017 InputFilter *ifilter;
4021 ret = avfilter_graph_request_oldest(graph->graph);
4023 return reap_filters(0);
4025 if (ret == AVERROR_EOF) {
4026 ret = reap_filters(1);
4027 for (i = 0; i < graph->nb_outputs; i++)
4028 close_output_stream(graph->outputs[i]->ost);
4031 if (ret != AVERROR(EAGAIN))
4034 for (i = 0; i < graph->nb_inputs; i++) {
4035 ifilter = graph->inputs[i];
4037 if (input_files[ist->file_index]->eagain ||
4038 input_files[ist->file_index]->eof_reached)
4040 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4041 if (nb_requests > nb_requests_max) {
4042 nb_requests_max = nb_requests;
4048 for (i = 0; i < graph->nb_outputs; i++)
4049 graph->outputs[i]->ost->unavailable = 1;
4055 * Run a single step of transcoding.
4057 * @return 0 for success, <0 for error
4059 static int transcode_step(void)
4065 ost = choose_output();
4072 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4077 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4082 av_assert0(ost->source_index >= 0);
4083 ist = input_streams[ost->source_index];
4086 ret = process_input(ist->file_index);
4087 if (ret == AVERROR(EAGAIN)) {
4088 if (input_files[ist->file_index]->eagain)
4089 ost->unavailable = 1;
4094 return ret == AVERROR_EOF ? 0 : ret;
4096 return reap_filters(0);
4100 * The following code is the main loop of the file converter
4102 static int transcode(void)
4105 AVFormatContext *os;
4108 int64_t timer_start;
4109 int64_t total_packets_written = 0;
4111 ret = transcode_init();
4115 if (stdin_interaction) {
4116 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4119 timer_start = av_gettime_relative();
4122 if ((ret = init_input_threads()) < 0)
4126 while (!received_sigterm) {
4127 int64_t cur_time= av_gettime_relative();
4129 /* if 'q' pressed, exits */
4130 if (stdin_interaction)
4131 if (check_keyboard_interaction(cur_time) < 0)
4134 /* check if there's any stream where output is still needed */
4135 if (!need_output()) {
4136 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4140 ret = transcode_step();
4141 if (ret < 0 && ret != AVERROR_EOF) {
4143 av_strerror(ret, errbuf, sizeof(errbuf));
4145 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4149 /* dump report by using the output first video and audio streams */
4150 print_report(0, timer_start, cur_time);
4153 free_input_threads();
4156 /* at the end of stream, we must flush the decoder buffers */
4157 for (i = 0; i < nb_input_streams; i++) {
4158 ist = input_streams[i];
4159 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4160 process_input_packet(ist, NULL, 0);
4167 /* write the trailer if needed and close file */
4168 for (i = 0; i < nb_output_files; i++) {
4169 os = output_files[i]->ctx;
4170 if ((ret = av_write_trailer(os)) < 0) {
4171 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4177 /* dump report by using the first video and audio streams */
4178 print_report(1, timer_start, av_gettime_relative());
4180 /* close each encoder */
4181 for (i = 0; i < nb_output_streams; i++) {
4182 ost = output_streams[i];
4183 if (ost->encoding_needed) {
4184 av_freep(&ost->enc_ctx->stats_in);
4186 total_packets_written += ost->packets_written;
4189 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4190 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4194 /* close each decoder */
4195 for (i = 0; i < nb_input_streams; i++) {
4196 ist = input_streams[i];
4197 if (ist->decoding_needed) {
4198 avcodec_close(ist->dec_ctx);
4199 if (ist->hwaccel_uninit)
4200 ist->hwaccel_uninit(ist->dec_ctx);
4209 free_input_threads();
4212 if (output_streams) {
4213 for (i = 0; i < nb_output_streams; i++) {
4214 ost = output_streams[i];
4217 if (fclose(ost->logfile))
4218 av_log(NULL, AV_LOG_ERROR,
4219 "Error closing logfile, loss of information possible: %s\n",
4220 av_err2str(AVERROR(errno)));
4221 ost->logfile = NULL;
4223 av_freep(&ost->forced_kf_pts);
4224 av_freep(&ost->apad);
4225 av_freep(&ost->disposition);
4226 av_dict_free(&ost->encoder_opts);
4227 av_dict_free(&ost->sws_dict);
4228 av_dict_free(&ost->swr_opts);
4229 av_dict_free(&ost->resample_opts);
4237 static int64_t getutime(void)
4240 struct rusage rusage;
4242 getrusage(RUSAGE_SELF, &rusage);
4243 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4244 #elif HAVE_GETPROCESSTIMES
4246 FILETIME c, e, k, u;
4247 proc = GetCurrentProcess();
4248 GetProcessTimes(proc, &c, &e, &k, &u);
4249 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4251 return av_gettime_relative();
4255 static int64_t getmaxrss(void)
4257 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4258 struct rusage rusage;
4259 getrusage(RUSAGE_SELF, &rusage);
4260 return (int64_t)rusage.ru_maxrss * 1024;
4261 #elif HAVE_GETPROCESSMEMORYINFO
4263 PROCESS_MEMORY_COUNTERS memcounters;
4264 proc = GetCurrentProcess();
4265 memcounters.cb = sizeof(memcounters);
4266 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4267 return memcounters.PeakPagefileUsage;
4273 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4277 int main(int argc, char **argv)
4282 register_exit(ffmpeg_cleanup);
4284 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4286 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4287 parse_loglevel(argc, argv, options);
4289 if(argc>1 && !strcmp(argv[1], "-d")){
4291 av_log_set_callback(log_callback_null);
4296 avcodec_register_all();
4298 avdevice_register_all();
4300 avfilter_register_all();
4302 avformat_network_init();
4304 show_banner(argc, argv, options);
4308 /* parse options and open all input/output files */
4309 ret = ffmpeg_parse_options(argc, argv);
4313 if (nb_output_files <= 0 && nb_input_files == 0) {
4315 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4319 /* file converter / grab */
4320 if (nb_output_files <= 0) {
4321 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4325 // if (nb_input_files == 0) {
4326 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4330 current_time = ti = getutime();
4331 if (transcode() < 0)
4333 ti = getutime() - ti;
4335 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4337 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4338 decode_error_stat[0], decode_error_stat[1]);
4339 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4342 exit_program(received_nb_signals ? 255 : main_return_code);
4343 return main_return_code;