2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
698 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
699 if (pkt->dts != AV_NOPTS_VALUE &&
700 pkt->pts != AV_NOPTS_VALUE &&
701 pkt->dts > pkt->pts) {
702 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
704 ost->file_index, ost->st->index);
706 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
707 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
708 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
711 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
712 pkt->dts != AV_NOPTS_VALUE &&
713 ost->last_mux_dts != AV_NOPTS_VALUE) {
714 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
715 if (pkt->dts < max) {
716 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
717 av_log(s, loglevel, "Non-monotonous DTS in output stream "
718 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
719 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
721 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
724 av_log(s, loglevel, "changing to %"PRId64". This may result "
725 "in incorrect timestamps in the output file.\n",
727 if(pkt->pts >= pkt->dts)
728 pkt->pts = FFMAX(pkt->pts, max);
733 ost->last_mux_dts = pkt->dts;
735 ost->data_size += pkt->size;
736 ost->packets_written++;
738 pkt->stream_index = ost->index;
741 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
742 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
743 av_get_media_type_string(ost->enc_ctx->codec_type),
744 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
745 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
750 ret = av_interleaved_write_frame(s, pkt);
752 print_error("av_interleaved_write_frame()", ret);
753 main_return_code = 1;
754 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
756 av_packet_unref(pkt);
759 static void close_output_stream(OutputStream *ost)
761 OutputFile *of = output_files[ost->file_index];
763 ost->finished |= ENCODER_FINISHED;
765 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
766 of->recording_time = FFMIN(of->recording_time, end);
770 static int check_recording_time(OutputStream *ost)
772 OutputFile *of = output_files[ost->file_index];
774 if (of->recording_time != INT64_MAX &&
775 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
776 AV_TIME_BASE_Q) >= 0) {
777 close_output_stream(ost);
783 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
786 AVCodecContext *enc = ost->enc_ctx;
790 av_init_packet(&pkt);
794 if (!check_recording_time(ost))
797 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
798 frame->pts = ost->sync_opts;
799 ost->sync_opts = frame->pts + frame->nb_samples;
800 ost->samples_encoded += frame->nb_samples;
801 ost->frames_encoded++;
803 av_assert0(pkt.size || !pkt.data);
804 update_benchmark(NULL);
806 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
807 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
808 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
809 enc->time_base.num, enc->time_base.den);
812 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
813 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
816 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
819 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
822 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
823 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
824 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
825 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
828 write_frame(s, &pkt, ost);
832 static void do_subtitle_out(AVFormatContext *s,
837 int subtitle_out_max_size = 1024 * 1024;
838 int subtitle_out_size, nb, i;
843 if (sub->pts == AV_NOPTS_VALUE) {
844 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
853 subtitle_out = av_malloc(subtitle_out_max_size);
855 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
860 /* Note: DVB subtitle need one packet to draw them and one other
861 packet to clear them */
862 /* XXX: signal it in the codec context ? */
863 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
868 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
870 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
871 pts -= output_files[ost->file_index]->start_time;
872 for (i = 0; i < nb; i++) {
873 unsigned save_num_rects = sub->num_rects;
875 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
876 if (!check_recording_time(ost))
880 // start_display_time is required to be 0
881 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
882 sub->end_display_time -= sub->start_display_time;
883 sub->start_display_time = 0;
887 ost->frames_encoded++;
889 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
890 subtitle_out_max_size, sub);
892 sub->num_rects = save_num_rects;
893 if (subtitle_out_size < 0) {
894 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
898 av_init_packet(&pkt);
899 pkt.data = subtitle_out;
900 pkt.size = subtitle_out_size;
901 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
902 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
903 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
904 /* XXX: the pts correction is handled here. Maybe handling
905 it in the codec would be better */
907 pkt.pts += 90 * sub->start_display_time;
909 pkt.pts += 90 * sub->end_display_time;
912 write_frame(s, &pkt, ost);
916 static void do_video_out(AVFormatContext *s,
918 AVFrame *next_picture,
921 int ret, format_video_sync;
923 AVCodecContext *enc = ost->enc_ctx;
924 AVCodecContext *mux_enc = ost->st->codec;
925 int nb_frames, nb0_frames, i;
926 double delta, delta0;
929 InputStream *ist = NULL;
930 AVFilterContext *filter = ost->filter->filter;
932 if (ost->source_index >= 0)
933 ist = input_streams[ost->source_index];
935 if (filter->inputs[0]->frame_rate.num > 0 &&
936 filter->inputs[0]->frame_rate.den > 0)
937 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
939 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
940 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
942 if (!ost->filters_script &&
946 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
947 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
952 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
953 ost->last_nb0_frames[1],
954 ost->last_nb0_frames[2]);
956 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
957 delta = delta0 + duration;
959 /* by default, we output a single frame */
960 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
963 format_video_sync = video_sync_method;
964 if (format_video_sync == VSYNC_AUTO) {
965 if(!strcmp(s->oformat->name, "avi")) {
966 format_video_sync = VSYNC_VFR;
968 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
970 && format_video_sync == VSYNC_CFR
971 && input_files[ist->file_index]->ctx->nb_streams == 1
972 && input_files[ist->file_index]->input_ts_offset == 0) {
973 format_video_sync = VSYNC_VSCFR;
975 if (format_video_sync == VSYNC_CFR && copy_ts) {
976 format_video_sync = VSYNC_VSCFR;
979 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
983 format_video_sync != VSYNC_PASSTHROUGH &&
984 format_video_sync != VSYNC_DROP) {
986 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
988 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
989 sync_ipts = ost->sync_opts;
994 switch (format_video_sync) {
996 if (ost->frame_number == 0 && delta0 >= 0.5) {
997 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1000 ost->sync_opts = lrint(sync_ipts);
1003 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1004 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1006 } else if (delta < -1.1)
1008 else if (delta > 1.1) {
1009 nb_frames = lrintf(delta);
1011 nb0_frames = lrintf(delta0 - 0.6);
1017 else if (delta > 0.6)
1018 ost->sync_opts = lrint(sync_ipts);
1021 case VSYNC_PASSTHROUGH:
1022 ost->sync_opts = lrint(sync_ipts);
1029 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1030 nb0_frames = FFMIN(nb0_frames, nb_frames);
1032 memmove(ost->last_nb0_frames + 1,
1033 ost->last_nb0_frames,
1034 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1035 ost->last_nb0_frames[0] = nb0_frames;
1037 if (nb0_frames == 0 && ost->last_dropped) {
1039 av_log(NULL, AV_LOG_VERBOSE,
1040 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1041 ost->frame_number, ost->st->index, ost->last_frame->pts);
1043 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1044 if (nb_frames > dts_error_threshold * 30) {
1045 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1049 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1050 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1052 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1054 /* duplicates frame if needed */
1055 for (i = 0; i < nb_frames; i++) {
1056 AVFrame *in_picture;
1057 av_init_packet(&pkt);
1061 if (i < nb0_frames && ost->last_frame) {
1062 in_picture = ost->last_frame;
1064 in_picture = next_picture;
1069 in_picture->pts = ost->sync_opts;
1072 if (!check_recording_time(ost))
1074 if (ost->frame_number >= ost->max_frames)
1078 #if FF_API_LAVF_FMT_RAWPICTURE
1079 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1080 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1081 /* raw pictures are written as AVPicture structure to
1082 avoid any copies. We support temporarily the older
1084 if (in_picture->interlaced_frame)
1085 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1087 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1088 pkt.data = (uint8_t *)in_picture;
1089 pkt.size = sizeof(AVPicture);
1090 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1091 pkt.flags |= AV_PKT_FLAG_KEY;
1093 write_frame(s, &pkt, ost);
1097 int got_packet, forced_keyframe = 0;
1100 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1101 ost->top_field_first >= 0)
1102 in_picture->top_field_first = !!ost->top_field_first;
1104 if (in_picture->interlaced_frame) {
1105 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1106 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1108 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1110 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1112 in_picture->quality = enc->global_quality;
1113 in_picture->pict_type = 0;
1115 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1116 in_picture->pts * av_q2d(enc->time_base) : NAN;
1117 if (ost->forced_kf_index < ost->forced_kf_count &&
1118 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1119 ost->forced_kf_index++;
1120 forced_keyframe = 1;
1121 } else if (ost->forced_keyframes_pexpr) {
1123 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1124 res = av_expr_eval(ost->forced_keyframes_pexpr,
1125 ost->forced_keyframes_expr_const_values, NULL);
1126 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1127 ost->forced_keyframes_expr_const_values[FKF_N],
1128 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1130 ost->forced_keyframes_expr_const_values[FKF_T],
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1134 forced_keyframe = 1;
1135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1136 ost->forced_keyframes_expr_const_values[FKF_N];
1137 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1138 ost->forced_keyframes_expr_const_values[FKF_T];
1139 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1142 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1143 } else if ( ost->forced_keyframes
1144 && !strncmp(ost->forced_keyframes, "source", 6)
1145 && in_picture->key_frame==1) {
1146 forced_keyframe = 1;
1149 if (forced_keyframe) {
1150 in_picture->pict_type = AV_PICTURE_TYPE_I;
1151 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1154 update_benchmark(NULL);
1156 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1157 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1158 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1159 enc->time_base.num, enc->time_base.den);
1162 ost->frames_encoded++;
1164 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1165 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1167 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1173 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1174 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1175 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1176 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1179 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1180 pkt.pts = ost->sync_opts;
1182 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1185 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1188 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1191 frame_size = pkt.size;
1192 write_frame(s, &pkt, ost);
1194 /* if two pass, output log */
1195 if (ost->logfile && enc->stats_out) {
1196 fprintf(ost->logfile, "%s", enc->stats_out);
1202 * For video, number of frames in == number of packets out.
1203 * But there may be reordering, so we can't throw away frames on encoder
1204 * flush, we need to limit them here, before they go into encoder.
1206 ost->frame_number++;
1208 if (vstats_filename && frame_size)
1209 do_video_stats(ost, frame_size);
1212 if (!ost->last_frame)
1213 ost->last_frame = av_frame_alloc();
1214 av_frame_unref(ost->last_frame);
1215 if (next_picture && ost->last_frame)
1216 av_frame_ref(ost->last_frame, next_picture);
1218 av_frame_free(&ost->last_frame);
1221 static double psnr(double d)
1223 return -10.0 * log10(d);
1226 static void do_video_stats(OutputStream *ost, int frame_size)
1228 AVCodecContext *enc;
1230 double ti1, bitrate, avg_bitrate;
1232 /* this is executed just the first time do_video_stats is called */
1234 vstats_file = fopen(vstats_filename, "w");
1242 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1243 frame_number = ost->st->nb_frames;
1244 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1245 ost->quality / (float)FF_QP2LAMBDA);
1247 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1248 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1250 fprintf(vstats_file,"f_size= %6d ", frame_size);
1251 /* compute pts value */
1252 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1256 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1257 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1258 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1259 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1260 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1264 static void finish_output_stream(OutputStream *ost)
1266 OutputFile *of = output_files[ost->file_index];
1269 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1272 for (i = 0; i < of->ctx->nb_streams; i++)
1273 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1278 * Get and encode new output from any of the filtergraphs, without causing
1281 * @return 0 for success, <0 for severe errors
1283 static int reap_filters(int flush)
1285 AVFrame *filtered_frame = NULL;
1288 /* Reap all buffers present in the buffer sinks */
1289 for (i = 0; i < nb_output_streams; i++) {
1290 OutputStream *ost = output_streams[i];
1291 OutputFile *of = output_files[ost->file_index];
1292 AVFilterContext *filter;
1293 AVCodecContext *enc = ost->enc_ctx;
1298 filter = ost->filter->filter;
1300 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1301 return AVERROR(ENOMEM);
1303 filtered_frame = ost->filtered_frame;
1306 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1307 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1308 AV_BUFFERSINK_FLAG_NO_REQUEST);
1310 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1311 av_log(NULL, AV_LOG_WARNING,
1312 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1313 } else if (flush && ret == AVERROR_EOF) {
1314 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1315 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1319 if (ost->finished) {
1320 av_frame_unref(filtered_frame);
1323 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1324 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1325 AVRational tb = enc->time_base;
1326 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1328 tb.den <<= extra_bits;
1330 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1331 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1332 float_pts /= 1 << extra_bits;
1333 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1334 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1336 filtered_frame->pts =
1337 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1338 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1340 //if (ost->source_index >= 0)
1341 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1343 switch (filter->inputs[0]->type) {
1344 case AVMEDIA_TYPE_VIDEO:
1345 if (!ost->frame_aspect_ratio.num)
1346 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1349 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1350 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1352 enc->time_base.num, enc->time_base.den);
1355 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1357 case AVMEDIA_TYPE_AUDIO:
1358 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1359 enc->channels != av_frame_get_channels(filtered_frame)) {
1360 av_log(NULL, AV_LOG_ERROR,
1361 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1364 do_audio_out(of->ctx, ost, filtered_frame);
1367 // TODO support subtitle filters
1371 av_frame_unref(filtered_frame);
1378 static void print_final_stats(int64_t total_size)
1380 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1381 uint64_t subtitle_size = 0;
1382 uint64_t data_size = 0;
1383 float percent = -1.0;
1387 for (i = 0; i < nb_output_streams; i++) {
1388 OutputStream *ost = output_streams[i];
1389 switch (ost->enc_ctx->codec_type) {
1390 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1391 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1392 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1393 default: other_size += ost->data_size; break;
1395 extra_size += ost->enc_ctx->extradata_size;
1396 data_size += ost->data_size;
1397 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1398 != AV_CODEC_FLAG_PASS1)
1402 if (data_size && total_size>0 && total_size >= data_size)
1403 percent = 100.0 * (total_size - data_size) / data_size;
1405 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1406 video_size / 1024.0,
1407 audio_size / 1024.0,
1408 subtitle_size / 1024.0,
1409 other_size / 1024.0,
1410 extra_size / 1024.0);
1412 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1414 av_log(NULL, AV_LOG_INFO, "unknown");
1415 av_log(NULL, AV_LOG_INFO, "\n");
1417 /* print verbose per-stream stats */
1418 for (i = 0; i < nb_input_files; i++) {
1419 InputFile *f = input_files[i];
1420 uint64_t total_packets = 0, total_size = 0;
1422 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1423 i, f->ctx->filename);
1425 for (j = 0; j < f->nb_streams; j++) {
1426 InputStream *ist = input_streams[f->ist_index + j];
1427 enum AVMediaType type = ist->dec_ctx->codec_type;
1429 total_size += ist->data_size;
1430 total_packets += ist->nb_packets;
1432 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1433 i, j, media_type_string(type));
1434 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1435 ist->nb_packets, ist->data_size);
1437 if (ist->decoding_needed) {
1438 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1439 ist->frames_decoded);
1440 if (type == AVMEDIA_TYPE_AUDIO)
1441 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1442 av_log(NULL, AV_LOG_VERBOSE, "; ");
1445 av_log(NULL, AV_LOG_VERBOSE, "\n");
1448 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1449 total_packets, total_size);
1452 for (i = 0; i < nb_output_files; i++) {
1453 OutputFile *of = output_files[i];
1454 uint64_t total_packets = 0, total_size = 0;
1456 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1457 i, of->ctx->filename);
1459 for (j = 0; j < of->ctx->nb_streams; j++) {
1460 OutputStream *ost = output_streams[of->ost_index + j];
1461 enum AVMediaType type = ost->enc_ctx->codec_type;
1463 total_size += ost->data_size;
1464 total_packets += ost->packets_written;
1466 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1467 i, j, media_type_string(type));
1468 if (ost->encoding_needed) {
1469 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1470 ost->frames_encoded);
1471 if (type == AVMEDIA_TYPE_AUDIO)
1472 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1473 av_log(NULL, AV_LOG_VERBOSE, "; ");
1476 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1477 ost->packets_written, ost->data_size);
1479 av_log(NULL, AV_LOG_VERBOSE, "\n");
1482 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1483 total_packets, total_size);
1485 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1486 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1488 av_log(NULL, AV_LOG_WARNING, "\n");
1490 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1495 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1498 AVBPrint buf_script;
1500 AVFormatContext *oc;
1502 AVCodecContext *enc;
1503 int frame_number, vid, i;
1506 int64_t pts = INT64_MIN + 1;
1507 static int64_t last_time = -1;
1508 static int qp_histogram[52];
1509 int hours, mins, secs, us;
1513 if (!print_stats && !is_last_report && !progress_avio)
1516 if (!is_last_report) {
1517 if (last_time == -1) {
1518 last_time = cur_time;
1521 if ((cur_time - last_time) < 500000)
1523 last_time = cur_time;
1526 t = (cur_time-timer_start) / 1000000.0;
1529 oc = output_files[0]->ctx;
1531 total_size = avio_size(oc->pb);
1532 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1533 total_size = avio_tell(oc->pb);
1537 av_bprint_init(&buf_script, 0, 1);
1538 for (i = 0; i < nb_output_streams; i++) {
1540 ost = output_streams[i];
1542 if (!ost->stream_copy)
1543 q = ost->quality / (float) FF_QP2LAMBDA;
1545 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1547 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1548 ost->file_index, ost->index, q);
1550 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1553 frame_number = ost->frame_number;
1554 fps = t > 1 ? frame_number / t : 0;
1555 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1556 frame_number, fps < 9.95, fps, q);
1557 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1558 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1559 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560 ost->file_index, ost->index, q);
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1566 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1568 for (j = 0; j < 32; j++)
1569 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1572 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1574 double error, error_sum = 0;
1575 double scale, scale_sum = 0;
1577 char type[3] = { 'Y','U','V' };
1578 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1579 for (j = 0; j < 3; j++) {
1580 if (is_last_report) {
1581 error = enc->error[j];
1582 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1584 error = ost->error[j];
1585 scale = enc->width * enc->height * 255.0 * 255.0;
1591 p = psnr(error / scale);
1592 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1593 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1594 ost->file_index, ost->index, type[j] | 32, p);
1596 p = psnr(error_sum / scale_sum);
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1598 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1599 ost->file_index, ost->index, p);
1603 /* compute min output value */
1604 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1605 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1606 ost->st->time_base, AV_TIME_BASE_Q));
1608 nb_frames_drop += ost->last_dropped;
1611 secs = FFABS(pts) / AV_TIME_BASE;
1612 us = FFABS(pts) % AV_TIME_BASE;
1618 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1619 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1621 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1623 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624 "size=%8.0fkB time=", total_size / 1024.0);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1629 (100 * us) / AV_TIME_BASE);
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633 av_bprintf(&buf_script, "bitrate=N/A\n");
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1639 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643 hours, mins, secs, us);
1645 if (nb_frames_dup || nb_frames_drop)
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1647 nb_frames_dup, nb_frames_drop);
1648 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1653 av_bprintf(&buf_script, "speed=N/A\n");
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1656 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1659 if (print_stats || is_last_report) {
1660 const char end = is_last_report ? '\n' : '\r';
1661 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662 fprintf(stderr, "%s %c", buf, end);
1664 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1669 if (progress_avio) {
1670 av_bprintf(&buf_script, "progress=%s\n",
1671 is_last_report ? "end" : "continue");
1672 avio_write(progress_avio, buf_script.str,
1673 FFMIN(buf_script.len, buf_script.size - 1));
1674 avio_flush(progress_avio);
1675 av_bprint_finalize(&buf_script, NULL);
1676 if (is_last_report) {
1677 if ((ret = avio_closep(&progress_avio)) < 0)
1678 av_log(NULL, AV_LOG_ERROR,
1679 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1684 print_final_stats(total_size);
1687 static void flush_encoders(void)
1691 for (i = 0; i < nb_output_streams; i++) {
1692 OutputStream *ost = output_streams[i];
1693 AVCodecContext *enc = ost->enc_ctx;
1694 AVFormatContext *os = output_files[ost->file_index]->ctx;
1695 int stop_encoding = 0;
1697 if (!ost->encoding_needed)
1700 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1702 #if FF_API_LAVF_FMT_RAWPICTURE
1703 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1708 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1711 switch (enc->codec_type) {
1712 case AVMEDIA_TYPE_AUDIO:
1713 encode = avcodec_encode_audio2;
1716 case AVMEDIA_TYPE_VIDEO:
1717 encode = avcodec_encode_video2;
1728 av_init_packet(&pkt);
1732 update_benchmark(NULL);
1733 ret = encode(enc, &pkt, NULL, &got_packet);
1734 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1736 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1741 if (ost->logfile && enc->stats_out) {
1742 fprintf(ost->logfile, "%s", enc->stats_out);
1748 if (ost->finished & MUXER_FINISHED) {
1749 av_packet_unref(&pkt);
1752 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1753 pkt_size = pkt.size;
1754 write_frame(os, &pkt, ost);
1755 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1756 do_video_stats(ost, pkt_size);
1767 * Check whether a packet from ist should be written into ost at this time
1769 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1771 OutputFile *of = output_files[ost->file_index];
1772 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1774 if (ost->source_index != ist_index)
1780 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1786 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1788 OutputFile *of = output_files[ost->file_index];
1789 InputFile *f = input_files [ist->file_index];
1790 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1791 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1795 av_init_packet(&opkt);
1797 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1798 !ost->copy_initial_nonkeyframes)
1801 if (!ost->frame_number && !ost->copy_prior_start) {
1802 int64_t comp_start = start_time;
1803 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1804 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1805 if (pkt->pts == AV_NOPTS_VALUE ?
1806 ist->pts < comp_start :
1807 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1811 if (of->recording_time != INT64_MAX &&
1812 ist->pts >= of->recording_time + start_time) {
1813 close_output_stream(ost);
1817 if (f->recording_time != INT64_MAX) {
1818 start_time = f->ctx->start_time;
1819 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1820 start_time += f->start_time;
1821 if (ist->pts >= f->recording_time + start_time) {
1822 close_output_stream(ost);
1827 /* force the input stream PTS */
1828 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1831 if (pkt->pts != AV_NOPTS_VALUE)
1832 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1834 opkt.pts = AV_NOPTS_VALUE;
1836 if (pkt->dts == AV_NOPTS_VALUE)
1837 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1839 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1840 opkt.dts -= ost_tb_start_time;
1842 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1843 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1845 duration = ist->dec_ctx->frame_size;
1846 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1847 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1848 ost->st->time_base) - ost_tb_start_time;
1851 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1852 opkt.flags = pkt->flags;
1853 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1854 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1855 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1856 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1857 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1859 int ret = av_parser_change(ost->parser, ost->st->codec,
1860 &opkt.data, &opkt.size,
1861 pkt->data, pkt->size,
1862 pkt->flags & AV_PKT_FLAG_KEY);
1864 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1869 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1874 opkt.data = pkt->data;
1875 opkt.size = pkt->size;
1877 av_copy_packet_side_data(&opkt, pkt);
1879 #if FF_API_LAVF_FMT_RAWPICTURE
1880 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1881 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1882 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1883 /* store AVPicture in AVPacket, as expected by the output format */
1884 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1886 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1890 opkt.data = (uint8_t *)&pict;
1891 opkt.size = sizeof(AVPicture);
1892 opkt.flags |= AV_PKT_FLAG_KEY;
1896 write_frame(of->ctx, &opkt, ost);
1899 int guess_input_channel_layout(InputStream *ist)
1901 AVCodecContext *dec = ist->dec_ctx;
1903 if (!dec->channel_layout) {
1904 char layout_name[256];
1906 if (dec->channels > ist->guess_layout_max)
1908 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1909 if (!dec->channel_layout)
1911 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1912 dec->channels, dec->channel_layout);
1913 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1914 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1919 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1921 if (*got_output || ret<0)
1922 decode_error_stat[ret<0] ++;
1924 if (ret < 0 && exit_on_error)
1927 if (exit_on_error && *got_output && ist) {
1928 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1929 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1935 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1937 AVFrame *decoded_frame, *f;
1938 AVCodecContext *avctx = ist->dec_ctx;
1939 int i, ret, err = 0, resample_changed;
1940 AVRational decoded_frame_tb;
1942 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1943 return AVERROR(ENOMEM);
1944 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1945 return AVERROR(ENOMEM);
1946 decoded_frame = ist->decoded_frame;
1948 update_benchmark(NULL);
1949 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1950 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1952 if (ret >= 0 && avctx->sample_rate <= 0) {
1953 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1954 ret = AVERROR_INVALIDDATA;
1957 check_decode_result(ist, got_output, ret);
1959 if (!*got_output || ret < 0)
1962 ist->samples_decoded += decoded_frame->nb_samples;
1963 ist->frames_decoded++;
1966 /* increment next_dts to use for the case where the input stream does not
1967 have timestamps or there are multiple frames in the packet */
1968 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1970 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1975 ist->resample_channels != avctx->channels ||
1976 ist->resample_channel_layout != decoded_frame->channel_layout ||
1977 ist->resample_sample_rate != decoded_frame->sample_rate;
1978 if (resample_changed) {
1979 char layout1[64], layout2[64];
1981 if (!guess_input_channel_layout(ist)) {
1982 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1983 "layout for Input Stream #%d.%d\n", ist->file_index,
1987 decoded_frame->channel_layout = avctx->channel_layout;
1989 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1990 ist->resample_channel_layout);
1991 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1992 decoded_frame->channel_layout);
1994 av_log(NULL, AV_LOG_INFO,
1995 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1996 ist->file_index, ist->st->index,
1997 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1998 ist->resample_channels, layout1,
1999 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2000 avctx->channels, layout2);
2002 ist->resample_sample_fmt = decoded_frame->format;
2003 ist->resample_sample_rate = decoded_frame->sample_rate;
2004 ist->resample_channel_layout = decoded_frame->channel_layout;
2005 ist->resample_channels = avctx->channels;
2007 for (i = 0; i < nb_filtergraphs; i++)
2008 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2009 FilterGraph *fg = filtergraphs[i];
2010 if (configure_filtergraph(fg) < 0) {
2011 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2017 /* if the decoder provides a pts, use it instead of the last packet pts.
2018 the decoder could be delaying output by a packet or more. */
2019 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2020 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2021 decoded_frame_tb = avctx->time_base;
2022 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2023 decoded_frame->pts = decoded_frame->pkt_pts;
2024 decoded_frame_tb = ist->st->time_base;
2025 } else if (pkt->pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = pkt->pts;
2027 decoded_frame_tb = ist->st->time_base;
2029 decoded_frame->pts = ist->dts;
2030 decoded_frame_tb = AV_TIME_BASE_Q;
2032 pkt->pts = AV_NOPTS_VALUE;
2033 if (decoded_frame->pts != AV_NOPTS_VALUE)
2034 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2035 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2036 (AVRational){1, avctx->sample_rate});
2037 ist->nb_samples = decoded_frame->nb_samples;
2038 for (i = 0; i < ist->nb_filters; i++) {
2039 if (i < ist->nb_filters - 1) {
2040 f = ist->filter_frame;
2041 err = av_frame_ref(f, decoded_frame);
2046 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2047 AV_BUFFERSRC_FLAG_PUSH);
2048 if (err == AVERROR_EOF)
2049 err = 0; /* ignore */
2053 decoded_frame->pts = AV_NOPTS_VALUE;
2055 av_frame_unref(ist->filter_frame);
2056 av_frame_unref(decoded_frame);
2057 return err < 0 ? err : ret;
2060 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2062 AVFrame *decoded_frame, *f;
2063 int i, ret = 0, err = 0, resample_changed;
2064 int64_t best_effort_timestamp;
2065 AVRational *frame_sample_aspect;
2067 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2068 return AVERROR(ENOMEM);
2069 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 decoded_frame = ist->decoded_frame;
2072 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2074 update_benchmark(NULL);
2075 ret = avcodec_decode_video2(ist->dec_ctx,
2076 decoded_frame, got_output, pkt);
2077 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2079 // The following line may be required in some cases where there is no parser
2080 // or the parser does not has_b_frames correctly
2081 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2082 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2083 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2085 av_log(ist->dec_ctx, AV_LOG_WARNING,
2086 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2087 "If you want to help, upload a sample "
2088 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2089 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2090 ist->dec_ctx->has_b_frames,
2091 ist->st->codec->has_b_frames);
2094 check_decode_result(ist, got_output, ret);
2096 if (*got_output && ret >= 0) {
2097 if (ist->dec_ctx->width != decoded_frame->width ||
2098 ist->dec_ctx->height != decoded_frame->height ||
2099 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2100 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2101 decoded_frame->width,
2102 decoded_frame->height,
2103 decoded_frame->format,
2104 ist->dec_ctx->width,
2105 ist->dec_ctx->height,
2106 ist->dec_ctx->pix_fmt);
2110 if (!*got_output || ret < 0)
2113 if(ist->top_field_first>=0)
2114 decoded_frame->top_field_first = ist->top_field_first;
2116 ist->frames_decoded++;
2118 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2119 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2123 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2125 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2126 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2127 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2129 if (ts != AV_NOPTS_VALUE)
2130 ist->next_pts = ist->pts = ts;
2134 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2135 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2136 ist->st->index, av_ts2str(decoded_frame->pts),
2137 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2138 best_effort_timestamp,
2139 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2140 decoded_frame->key_frame, decoded_frame->pict_type,
2141 ist->st->time_base.num, ist->st->time_base.den);
2146 if (ist->st->sample_aspect_ratio.num)
2147 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2149 resample_changed = ist->resample_width != decoded_frame->width ||
2150 ist->resample_height != decoded_frame->height ||
2151 ist->resample_pix_fmt != decoded_frame->format;
2152 if (resample_changed) {
2153 av_log(NULL, AV_LOG_INFO,
2154 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2155 ist->file_index, ist->st->index,
2156 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2157 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2159 ist->resample_width = decoded_frame->width;
2160 ist->resample_height = decoded_frame->height;
2161 ist->resample_pix_fmt = decoded_frame->format;
2163 for (i = 0; i < nb_filtergraphs; i++) {
2164 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2165 configure_filtergraph(filtergraphs[i]) < 0) {
2166 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2172 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2173 for (i = 0; i < ist->nb_filters; i++) {
2174 if (!frame_sample_aspect->num)
2175 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2177 if (i < ist->nb_filters - 1) {
2178 f = ist->filter_frame;
2179 err = av_frame_ref(f, decoded_frame);
2184 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2185 if (ret == AVERROR_EOF) {
2186 ret = 0; /* ignore */
2187 } else if (ret < 0) {
2188 av_log(NULL, AV_LOG_FATAL,
2189 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2195 av_frame_unref(ist->filter_frame);
2196 av_frame_unref(decoded_frame);
2197 return err < 0 ? err : ret;
2200 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2202 AVSubtitle subtitle;
2203 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2204 &subtitle, got_output, pkt);
2206 check_decode_result(NULL, got_output, ret);
2208 if (ret < 0 || !*got_output) {
2210 sub2video_flush(ist);
2214 if (ist->fix_sub_duration) {
2216 if (ist->prev_sub.got_output) {
2217 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2218 1000, AV_TIME_BASE);
2219 if (end < ist->prev_sub.subtitle.end_display_time) {
2220 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2221 "Subtitle duration reduced from %d to %d%s\n",
2222 ist->prev_sub.subtitle.end_display_time, end,
2223 end <= 0 ? ", dropping it" : "");
2224 ist->prev_sub.subtitle.end_display_time = end;
2227 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2228 FFSWAP(int, ret, ist->prev_sub.ret);
2229 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2237 sub2video_update(ist, &subtitle);
2239 if (!subtitle.num_rects)
2242 ist->frames_decoded++;
2244 for (i = 0; i < nb_output_streams; i++) {
2245 OutputStream *ost = output_streams[i];
2247 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2248 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2251 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2255 avsubtitle_free(&subtitle);
2259 static int send_filter_eof(InputStream *ist)
2262 for (i = 0; i < ist->nb_filters; i++) {
2263 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2270 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2271 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2277 if (!ist->saw_first_ts) {
2278 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2280 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2281 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2282 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2284 ist->saw_first_ts = 1;
2287 if (ist->next_dts == AV_NOPTS_VALUE)
2288 ist->next_dts = ist->dts;
2289 if (ist->next_pts == AV_NOPTS_VALUE)
2290 ist->next_pts = ist->pts;
2294 av_init_packet(&avpkt);
2302 if (pkt->dts != AV_NOPTS_VALUE) {
2303 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2304 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2305 ist->next_pts = ist->pts = ist->dts;
2308 // while we have more to decode or while the decoder did output something on EOF
2309 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2313 ist->pts = ist->next_pts;
2314 ist->dts = ist->next_dts;
2316 if (avpkt.size && avpkt.size != pkt->size &&
2317 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2318 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2319 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2320 ist->showed_multi_packet_warning = 1;
2323 switch (ist->dec_ctx->codec_type) {
2324 case AVMEDIA_TYPE_AUDIO:
2325 ret = decode_audio (ist, &avpkt, &got_output);
2327 case AVMEDIA_TYPE_VIDEO:
2328 ret = decode_video (ist, &avpkt, &got_output);
2329 if (avpkt.duration) {
2330 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2331 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2332 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2333 duration = ((int64_t)AV_TIME_BASE *
2334 ist->dec_ctx->framerate.den * ticks) /
2335 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2339 if(ist->dts != AV_NOPTS_VALUE && duration) {
2340 ist->next_dts += duration;
2342 ist->next_dts = AV_NOPTS_VALUE;
2345 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2347 case AVMEDIA_TYPE_SUBTITLE:
2348 ret = transcode_subtitles(ist, &avpkt, &got_output);
2355 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2356 ist->file_index, ist->st->index, av_err2str(ret));
2363 avpkt.pts= AV_NOPTS_VALUE;
2365 // touch data and size only if not EOF
2367 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2375 if (got_output && !pkt)
2379 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2380 /* except when looping we need to flush but not to send an EOF */
2381 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2382 int ret = send_filter_eof(ist);
2384 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2389 /* handle stream copy */
2390 if (!ist->decoding_needed) {
2391 ist->dts = ist->next_dts;
2392 switch (ist->dec_ctx->codec_type) {
2393 case AVMEDIA_TYPE_AUDIO:
2394 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2395 ist->dec_ctx->sample_rate;
2397 case AVMEDIA_TYPE_VIDEO:
2398 if (ist->framerate.num) {
2399 // TODO: Remove work-around for c99-to-c89 issue 7
2400 AVRational time_base_q = AV_TIME_BASE_Q;
2401 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2402 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2403 } else if (pkt->duration) {
2404 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2405 } else if(ist->dec_ctx->framerate.num != 0) {
2406 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2407 ist->next_dts += ((int64_t)AV_TIME_BASE *
2408 ist->dec_ctx->framerate.den * ticks) /
2409 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2413 ist->pts = ist->dts;
2414 ist->next_pts = ist->next_dts;
2416 for (i = 0; pkt && i < nb_output_streams; i++) {
2417 OutputStream *ost = output_streams[i];
2419 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2422 do_streamcopy(ist, ost, pkt);
2428 static void print_sdp(void)
2433 AVIOContext *sdp_pb;
2434 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2438 for (i = 0, j = 0; i < nb_output_files; i++) {
2439 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2440 avc[j] = output_files[i]->ctx;
2448 av_sdp_create(avc, j, sdp, sizeof(sdp));
2450 if (!sdp_filename) {
2451 printf("SDP:\n%s\n", sdp);
2454 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2455 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2457 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2458 avio_closep(&sdp_pb);
2459 av_freep(&sdp_filename);
2467 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2470 for (i = 0; hwaccels[i].name; i++)
2471 if (hwaccels[i].pix_fmt == pix_fmt)
2472 return &hwaccels[i];
2476 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2478 InputStream *ist = s->opaque;
2479 const enum AVPixelFormat *p;
2482 for (p = pix_fmts; *p != -1; p++) {
2483 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2484 const HWAccel *hwaccel;
2486 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2489 hwaccel = get_hwaccel(*p);
2491 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2492 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2495 ret = hwaccel->init(s);
2497 if (ist->hwaccel_id == hwaccel->id) {
2498 av_log(NULL, AV_LOG_FATAL,
2499 "%s hwaccel requested for input stream #%d:%d, "
2500 "but cannot be initialized.\n", hwaccel->name,
2501 ist->file_index, ist->st->index);
2502 return AV_PIX_FMT_NONE;
2506 ist->active_hwaccel_id = hwaccel->id;
2507 ist->hwaccel_pix_fmt = *p;
2514 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2516 InputStream *ist = s->opaque;
2518 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2519 return ist->hwaccel_get_buffer(s, frame, flags);
2521 return avcodec_default_get_buffer2(s, frame, flags);
2524 static int init_input_stream(int ist_index, char *error, int error_len)
2527 InputStream *ist = input_streams[ist_index];
2529 if (ist->decoding_needed) {
2530 AVCodec *codec = ist->dec;
2532 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2533 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2534 return AVERROR(EINVAL);
2537 ist->dec_ctx->opaque = ist;
2538 ist->dec_ctx->get_format = get_format;
2539 ist->dec_ctx->get_buffer2 = get_buffer;
2540 ist->dec_ctx->thread_safe_callbacks = 1;
2542 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2543 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2544 (ist->decoding_needed & DECODING_FOR_OST)) {
2545 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2546 if (ist->decoding_needed & DECODING_FOR_FILTER)
2547 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2550 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2551 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2552 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2553 if (ret == AVERROR_EXPERIMENTAL)
2554 abort_codec_experimental(codec, 0);
2556 snprintf(error, error_len,
2557 "Error while opening decoder for input stream "
2559 ist->file_index, ist->st->index, av_err2str(ret));
2562 assert_avoptions(ist->decoder_opts);
2565 ist->next_pts = AV_NOPTS_VALUE;
2566 ist->next_dts = AV_NOPTS_VALUE;
2571 static InputStream *get_input_stream(OutputStream *ost)
2573 if (ost->source_index >= 0)
2574 return input_streams[ost->source_index];
2578 static int compare_int64(const void *a, const void *b)
2580 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2583 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2587 if (ost->encoding_needed) {
2588 AVCodec *codec = ost->enc;
2589 AVCodecContext *dec = NULL;
2592 if ((ist = get_input_stream(ost)))
2594 if (dec && dec->subtitle_header) {
2595 /* ASS code assumes this buffer is null terminated so add extra byte. */
2596 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2597 if (!ost->enc_ctx->subtitle_header)
2598 return AVERROR(ENOMEM);
2599 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2600 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2602 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2603 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2604 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2606 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2607 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2608 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2610 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2611 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2612 if (!ost->enc_ctx->hw_frames_ctx)
2613 return AVERROR(ENOMEM);
2616 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2617 if (ret == AVERROR_EXPERIMENTAL)
2618 abort_codec_experimental(codec, 1);
2619 snprintf(error, error_len,
2620 "Error while opening encoder for output stream #%d:%d - "
2621 "maybe incorrect parameters such as bit_rate, rate, width or height",
2622 ost->file_index, ost->index);
2625 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2626 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2627 av_buffersink_set_frame_size(ost->filter->filter,
2628 ost->enc_ctx->frame_size);
2629 assert_avoptions(ost->encoder_opts);
2630 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2631 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2632 " It takes bits/s as argument, not kbits/s\n");
2634 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2636 av_log(NULL, AV_LOG_FATAL,
2637 "Error initializing the output stream codec context.\n");
2641 if (ost->enc_ctx->nb_coded_side_data) {
2644 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2645 sizeof(*ost->st->side_data));
2646 if (!ost->st->side_data)
2647 return AVERROR(ENOMEM);
2649 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2650 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2651 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2653 sd_dst->data = av_malloc(sd_src->size);
2655 return AVERROR(ENOMEM);
2656 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2657 sd_dst->size = sd_src->size;
2658 sd_dst->type = sd_src->type;
2659 ost->st->nb_side_data++;
2663 // copy timebase while removing common factors
2664 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2665 ost->st->codec->codec= ost->enc_ctx->codec;
2667 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2669 av_log(NULL, AV_LOG_FATAL,
2670 "Error setting up codec context options.\n");
2673 // copy timebase while removing common factors
2674 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2680 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2681 AVCodecContext *avctx)
2684 int n = 1, i, size, index = 0;
2687 for (p = kf; *p; p++)
2691 pts = av_malloc_array(size, sizeof(*pts));
2693 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2698 for (i = 0; i < n; i++) {
2699 char *next = strchr(p, ',');
2704 if (!memcmp(p, "chapters", 8)) {
2706 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2709 if (avf->nb_chapters > INT_MAX - size ||
2710 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2712 av_log(NULL, AV_LOG_FATAL,
2713 "Could not allocate forced key frames array.\n");
2716 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2717 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2719 for (j = 0; j < avf->nb_chapters; j++) {
2720 AVChapter *c = avf->chapters[j];
2721 av_assert1(index < size);
2722 pts[index++] = av_rescale_q(c->start, c->time_base,
2723 avctx->time_base) + t;
2728 t = parse_time_or_die("force_key_frames", p, 1);
2729 av_assert1(index < size);
2730 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2737 av_assert0(index == size);
2738 qsort(pts, size, sizeof(*pts), compare_int64);
2739 ost->forced_kf_count = size;
2740 ost->forced_kf_pts = pts;
2743 static void report_new_stream(int input_index, AVPacket *pkt)
2745 InputFile *file = input_files[input_index];
2746 AVStream *st = file->ctx->streams[pkt->stream_index];
2748 if (pkt->stream_index < file->nb_streams_warn)
2750 av_log(file->ctx, AV_LOG_WARNING,
2751 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2752 av_get_media_type_string(st->codec->codec_type),
2753 input_index, pkt->stream_index,
2754 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2755 file->nb_streams_warn = pkt->stream_index + 1;
2758 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2760 AVDictionaryEntry *e;
2762 uint8_t *encoder_string;
2763 int encoder_string_len;
2764 int format_flags = 0;
2765 int codec_flags = 0;
2767 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2770 e = av_dict_get(of->opts, "fflags", NULL, 0);
2772 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2775 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2777 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2779 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2782 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2785 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2786 encoder_string = av_mallocz(encoder_string_len);
2787 if (!encoder_string)
2790 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2791 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2793 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2794 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2795 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2796 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2799 static int transcode_init(void)
2801 int ret = 0, i, j, k;
2802 AVFormatContext *oc;
2805 char error[1024] = {0};
2808 for (i = 0; i < nb_filtergraphs; i++) {
2809 FilterGraph *fg = filtergraphs[i];
2810 for (j = 0; j < fg->nb_outputs; j++) {
2811 OutputFilter *ofilter = fg->outputs[j];
2812 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2814 if (fg->nb_inputs != 1)
2816 for (k = nb_input_streams-1; k >= 0 ; k--)
2817 if (fg->inputs[0]->ist == input_streams[k])
2819 ofilter->ost->source_index = k;
2823 /* init framerate emulation */
2824 for (i = 0; i < nb_input_files; i++) {
2825 InputFile *ifile = input_files[i];
2826 if (ifile->rate_emu)
2827 for (j = 0; j < ifile->nb_streams; j++)
2828 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2831 /* for each output stream, we compute the right encoding parameters */
2832 for (i = 0; i < nb_output_streams; i++) {
2833 AVCodecContext *enc_ctx;
2834 AVCodecContext *dec_ctx = NULL;
2835 ost = output_streams[i];
2836 oc = output_files[ost->file_index]->ctx;
2837 ist = get_input_stream(ost);
2839 if (ost->attachment_filename)
2842 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2845 dec_ctx = ist->dec_ctx;
2847 ost->st->disposition = ist->st->disposition;
2848 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2849 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2851 for (j=0; j<oc->nb_streams; j++) {
2852 AVStream *st = oc->streams[j];
2853 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2856 if (j == oc->nb_streams)
2857 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2858 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2861 if (ost->stream_copy) {
2863 uint64_t extra_size;
2865 av_assert0(ist && !ost->filter);
2867 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2869 if (extra_size > INT_MAX) {
2870 return AVERROR(EINVAL);
2873 /* if stream_copy is selected, no need to decode or encode */
2874 enc_ctx->codec_id = dec_ctx->codec_id;
2875 enc_ctx->codec_type = dec_ctx->codec_type;
2877 if (!enc_ctx->codec_tag) {
2878 unsigned int codec_tag;
2879 if (!oc->oformat->codec_tag ||
2880 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2881 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2882 enc_ctx->codec_tag = dec_ctx->codec_tag;
2885 enc_ctx->bit_rate = dec_ctx->bit_rate;
2886 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2887 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2888 enc_ctx->field_order = dec_ctx->field_order;
2889 if (dec_ctx->extradata_size) {
2890 enc_ctx->extradata = av_mallocz(extra_size);
2891 if (!enc_ctx->extradata) {
2892 return AVERROR(ENOMEM);
2894 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2896 enc_ctx->extradata_size= dec_ctx->extradata_size;
2897 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2899 enc_ctx->time_base = ist->st->time_base;
2901 * Avi is a special case here because it supports variable fps but
2902 * having the fps and timebase differe significantly adds quite some
2905 if(!strcmp(oc->oformat->name, "avi")) {
2906 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2907 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2908 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2909 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2911 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2912 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2913 enc_ctx->ticks_per_frame = 2;
2914 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2915 && av_q2d(ist->st->time_base) < 1.0/500
2917 enc_ctx->time_base = dec_ctx->time_base;
2918 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2919 enc_ctx->time_base.den *= 2;
2920 enc_ctx->ticks_per_frame = 2;
2922 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2923 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2924 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2925 && strcmp(oc->oformat->name, "f4v")
2927 if( copy_tb<0 && dec_ctx->time_base.den
2928 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2929 && av_q2d(ist->st->time_base) < 1.0/500
2931 enc_ctx->time_base = dec_ctx->time_base;
2932 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2935 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2936 && dec_ctx->time_base.num < dec_ctx->time_base.den
2937 && dec_ctx->time_base.num > 0
2938 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2939 enc_ctx->time_base = dec_ctx->time_base;
2942 if (!ost->frame_rate.num)
2943 ost->frame_rate = ist->framerate;
2944 if(ost->frame_rate.num)
2945 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2947 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2948 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2950 if (ist->st->nb_side_data) {
2951 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2952 sizeof(*ist->st->side_data));
2953 if (!ost->st->side_data)
2954 return AVERROR(ENOMEM);
2956 ost->st->nb_side_data = 0;
2957 for (j = 0; j < ist->st->nb_side_data; j++) {
2958 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2959 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2961 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2964 sd_dst->data = av_malloc(sd_src->size);
2966 return AVERROR(ENOMEM);
2967 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2968 sd_dst->size = sd_src->size;
2969 sd_dst->type = sd_src->type;
2970 ost->st->nb_side_data++;
2974 ost->parser = av_parser_init(enc_ctx->codec_id);
2976 switch (enc_ctx->codec_type) {
2977 case AVMEDIA_TYPE_AUDIO:
2978 if (audio_volume != 256) {
2979 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2982 enc_ctx->channel_layout = dec_ctx->channel_layout;
2983 enc_ctx->sample_rate = dec_ctx->sample_rate;
2984 enc_ctx->channels = dec_ctx->channels;
2985 enc_ctx->frame_size = dec_ctx->frame_size;
2986 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2987 enc_ctx->block_align = dec_ctx->block_align;
2988 enc_ctx->initial_padding = dec_ctx->delay;
2989 enc_ctx->profile = dec_ctx->profile;
2990 #if FF_API_AUDIOENC_DELAY
2991 enc_ctx->delay = dec_ctx->delay;
2993 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2994 enc_ctx->block_align= 0;
2995 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2996 enc_ctx->block_align= 0;
2998 case AVMEDIA_TYPE_VIDEO:
2999 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3000 enc_ctx->width = dec_ctx->width;
3001 enc_ctx->height = dec_ctx->height;
3002 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3003 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3005 av_mul_q(ost->frame_aspect_ratio,
3006 (AVRational){ enc_ctx->height, enc_ctx->width });
3007 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3008 "with stream copy may produce invalid files\n");
3010 else if (ist->st->sample_aspect_ratio.num)
3011 sar = ist->st->sample_aspect_ratio;
3013 sar = dec_ctx->sample_aspect_ratio;
3014 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3015 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3016 ost->st->r_frame_rate = ist->st->r_frame_rate;
3018 case AVMEDIA_TYPE_SUBTITLE:
3019 enc_ctx->width = dec_ctx->width;
3020 enc_ctx->height = dec_ctx->height;
3022 case AVMEDIA_TYPE_UNKNOWN:
3023 case AVMEDIA_TYPE_DATA:
3024 case AVMEDIA_TYPE_ATTACHMENT:
3031 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3033 /* should only happen when a default codec is not present. */
3034 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3035 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3036 ret = AVERROR(EINVAL);
3040 set_encoder_id(output_files[ost->file_index], ost);
3043 if (qsv_transcode_init(ost))
3048 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3049 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3051 fg = init_simple_filtergraph(ist, ost);
3052 if (configure_filtergraph(fg)) {
3053 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3058 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3059 if (!ost->frame_rate.num)
3060 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3061 if (ist && !ost->frame_rate.num)
3062 ost->frame_rate = ist->framerate;
3063 if (ist && !ost->frame_rate.num)
3064 ost->frame_rate = ist->st->r_frame_rate;
3065 if (ist && !ost->frame_rate.num) {
3066 ost->frame_rate = (AVRational){25, 1};
3067 av_log(NULL, AV_LOG_WARNING,
3069 "about the input framerate is available. Falling "
3070 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3071 "if you want a different framerate.\n",
3072 ost->file_index, ost->index);
3074 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3075 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3076 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3077 ost->frame_rate = ost->enc->supported_framerates[idx];
3079 // reduce frame rate for mpeg4 to be within the spec limits
3080 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3081 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3082 ost->frame_rate.num, ost->frame_rate.den, 65535);
3086 switch (enc_ctx->codec_type) {
3087 case AVMEDIA_TYPE_AUDIO:
3088 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3089 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3090 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3091 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3092 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3094 case AVMEDIA_TYPE_VIDEO:
3095 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3096 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3097 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3098 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3099 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3100 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3101 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3103 for (j = 0; j < ost->forced_kf_count; j++)
3104 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3106 enc_ctx->time_base);
3108 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3109 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3110 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3111 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3112 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3113 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3114 if (!strncmp(ost->enc->name, "libx264", 7) &&
3115 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3116 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3117 av_log(NULL, AV_LOG_WARNING,
3118 "No pixel format specified, %s for H.264 encoding chosen.\n"
3119 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3120 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3121 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3122 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3123 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3124 av_log(NULL, AV_LOG_WARNING,
3125 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3126 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3127 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3128 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3130 ost->st->avg_frame_rate = ost->frame_rate;
3133 enc_ctx->width != dec_ctx->width ||
3134 enc_ctx->height != dec_ctx->height ||
3135 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3136 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3139 if (ost->forced_keyframes) {
3140 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3141 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3142 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3144 av_log(NULL, AV_LOG_ERROR,
3145 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3148 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3149 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3150 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3151 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3153 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3154 // parse it only for static kf timings
3155 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3156 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3160 case AVMEDIA_TYPE_SUBTITLE:
3161 enc_ctx->time_base = (AVRational){1, 1000};
3162 if (!enc_ctx->width) {
3163 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3164 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3167 case AVMEDIA_TYPE_DATA:
3175 if (ost->disposition) {
3176 static const AVOption opts[] = {
3177 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3178 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3179 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3180 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3181 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3182 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3183 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3184 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3185 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3186 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3187 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3188 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3189 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3190 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3193 static const AVClass class = {
3195 .item_name = av_default_item_name,
3197 .version = LIBAVUTIL_VERSION_INT,
3199 const AVClass *pclass = &class;
3201 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3207 /* init input streams */
3208 for (i = 0; i < nb_input_streams; i++)
3209 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3210 for (i = 0; i < nb_output_streams; i++) {
3211 ost = output_streams[i];
3212 avcodec_close(ost->enc_ctx);
3217 /* open each encoder */
3218 for (i = 0; i < nb_output_streams; i++) {
3219 ret = init_output_stream(output_streams[i], error, sizeof(error));
3224 /* discard unused programs */
3225 for (i = 0; i < nb_input_files; i++) {
3226 InputFile *ifile = input_files[i];
3227 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3228 AVProgram *p = ifile->ctx->programs[j];
3229 int discard = AVDISCARD_ALL;
3231 for (k = 0; k < p->nb_stream_indexes; k++)
3232 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3233 discard = AVDISCARD_DEFAULT;
3236 p->discard = discard;
3240 /* open files and write file headers */
3241 for (i = 0; i < nb_output_files; i++) {
3242 oc = output_files[i]->ctx;
3243 oc->interrupt_callback = int_cb;
3244 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3245 snprintf(error, sizeof(error),
3246 "Could not write header for output file #%d "
3247 "(incorrect codec parameters ?): %s",
3248 i, av_err2str(ret));
3249 ret = AVERROR(EINVAL);
3252 // assert_avoptions(output_files[i]->opts);
3253 if (strcmp(oc->oformat->name, "rtp")) {
3259 /* dump the file output parameters - cannot be done before in case
3261 for (i = 0; i < nb_output_files; i++) {
3262 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3265 /* dump the stream mapping */
3266 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3267 for (i = 0; i < nb_input_streams; i++) {
3268 ist = input_streams[i];
3270 for (j = 0; j < ist->nb_filters; j++) {
3271 if (ist->filters[j]->graph->graph_desc) {
3272 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3273 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3274 ist->filters[j]->name);
3275 if (nb_filtergraphs > 1)
3276 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3277 av_log(NULL, AV_LOG_INFO, "\n");
3282 for (i = 0; i < nb_output_streams; i++) {
3283 ost = output_streams[i];
3285 if (ost->attachment_filename) {
3286 /* an attached file */
3287 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3288 ost->attachment_filename, ost->file_index, ost->index);
3292 if (ost->filter && ost->filter->graph->graph_desc) {
3293 /* output from a complex graph */
3294 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3295 if (nb_filtergraphs > 1)
3296 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3298 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3299 ost->index, ost->enc ? ost->enc->name : "?");
3303 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3304 input_streams[ost->source_index]->file_index,
3305 input_streams[ost->source_index]->st->index,
3308 if (ost->sync_ist != input_streams[ost->source_index])
3309 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3310 ost->sync_ist->file_index,
3311 ost->sync_ist->st->index);
3312 if (ost->stream_copy)
3313 av_log(NULL, AV_LOG_INFO, " (copy)");
3315 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3316 const AVCodec *out_codec = ost->enc;
3317 const char *decoder_name = "?";
3318 const char *in_codec_name = "?";
3319 const char *encoder_name = "?";
3320 const char *out_codec_name = "?";
3321 const AVCodecDescriptor *desc;
3324 decoder_name = in_codec->name;
3325 desc = avcodec_descriptor_get(in_codec->id);
3327 in_codec_name = desc->name;
3328 if (!strcmp(decoder_name, in_codec_name))
3329 decoder_name = "native";
3333 encoder_name = out_codec->name;
3334 desc = avcodec_descriptor_get(out_codec->id);
3336 out_codec_name = desc->name;
3337 if (!strcmp(encoder_name, out_codec_name))
3338 encoder_name = "native";
3341 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3342 in_codec_name, decoder_name,
3343 out_codec_name, encoder_name);
3345 av_log(NULL, AV_LOG_INFO, "\n");
3349 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3353 if (sdp_filename || want_sdp) {
3357 transcode_init_done = 1;
3362 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3363 static int need_output(void)
3367 for (i = 0; i < nb_output_streams; i++) {
3368 OutputStream *ost = output_streams[i];
3369 OutputFile *of = output_files[ost->file_index];
3370 AVFormatContext *os = output_files[ost->file_index]->ctx;
3372 if (ost->finished ||
3373 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3375 if (ost->frame_number >= ost->max_frames) {
3377 for (j = 0; j < of->ctx->nb_streams; j++)
3378 close_output_stream(output_streams[of->ost_index + j]);
3389 * Select the output stream to process.
3391 * @return selected output stream, or NULL if none available
3393 static OutputStream *choose_output(void)
3396 int64_t opts_min = INT64_MAX;
3397 OutputStream *ost_min = NULL;
3399 for (i = 0; i < nb_output_streams; i++) {
3400 OutputStream *ost = output_streams[i];
3401 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3402 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3404 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3405 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3407 if (!ost->finished && opts < opts_min) {
3409 ost_min = ost->unavailable ? NULL : ost;
3415 static void set_tty_echo(int on)
3419 if (tcgetattr(0, &tty) == 0) {
3420 if (on) tty.c_lflag |= ECHO;
3421 else tty.c_lflag &= ~ECHO;
3422 tcsetattr(0, TCSANOW, &tty);
3427 static int check_keyboard_interaction(int64_t cur_time)
3430 static int64_t last_time;
3431 if (received_nb_signals)
3432 return AVERROR_EXIT;
3433 /* read_key() returns 0 on EOF */
3434 if(cur_time - last_time >= 100000 && !run_as_daemon){
3436 last_time = cur_time;
3440 return AVERROR_EXIT;
3441 if (key == '+') av_log_set_level(av_log_get_level()+10);
3442 if (key == '-') av_log_set_level(av_log_get_level()-10);
3443 if (key == 's') qp_hist ^= 1;
3446 do_hex_dump = do_pkt_dump = 0;
3447 } else if(do_pkt_dump){
3451 av_log_set_level(AV_LOG_DEBUG);
3453 if (key == 'c' || key == 'C'){
3454 char buf[4096], target[64], command[256], arg[256] = {0};
3457 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3460 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3465 fprintf(stderr, "\n");
3467 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3468 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3469 target, time, command, arg);
3470 for (i = 0; i < nb_filtergraphs; i++) {
3471 FilterGraph *fg = filtergraphs[i];
3474 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3475 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3476 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3477 } else if (key == 'c') {
3478 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3479 ret = AVERROR_PATCHWELCOME;
3481 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3483 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3488 av_log(NULL, AV_LOG_ERROR,
3489 "Parse error, at least 3 arguments were expected, "
3490 "only %d given in string '%s'\n", n, buf);
3493 if (key == 'd' || key == 'D'){
3496 debug = input_streams[0]->st->codec->debug<<1;
3497 if(!debug) debug = 1;
3498 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3505 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3510 fprintf(stderr, "\n");
3511 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3512 fprintf(stderr,"error parsing debug value\n");
3514 for(i=0;i<nb_input_streams;i++) {
3515 input_streams[i]->st->codec->debug = debug;
3517 for(i=0;i<nb_output_streams;i++) {
3518 OutputStream *ost = output_streams[i];
3519 ost->enc_ctx->debug = debug;
3521 if(debug) av_log_set_level(AV_LOG_DEBUG);
3522 fprintf(stderr,"debug=%d\n", debug);
3525 fprintf(stderr, "key function\n"
3526 "? show this help\n"
3527 "+ increase verbosity\n"
3528 "- decrease verbosity\n"
3529 "c Send command to first matching filter supporting it\n"
3530 "C Send/Que command to all matching filters\n"
3531 "D cycle through available debug modes\n"
3532 "h dump packets/hex press to cycle through the 3 states\n"
3534 "s Show QP histogram\n"
3541 static void *input_thread(void *arg)
3544 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3549 ret = av_read_frame(f->ctx, &pkt);
3551 if (ret == AVERROR(EAGAIN)) {
3556 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3559 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3560 if (flags && ret == AVERROR(EAGAIN)) {
3562 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3563 av_log(f->ctx, AV_LOG_WARNING,
3564 "Thread message queue blocking; consider raising the "
3565 "thread_queue_size option (current value: %d)\n",
3566 f->thread_queue_size);
3569 if (ret != AVERROR_EOF)
3570 av_log(f->ctx, AV_LOG_ERROR,
3571 "Unable to send packet to main thread: %s\n",
3573 av_packet_unref(&pkt);
3574 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3582 static void free_input_threads(void)
3586 for (i = 0; i < nb_input_files; i++) {
3587 InputFile *f = input_files[i];
3590 if (!f || !f->in_thread_queue)
3592 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3593 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3594 av_packet_unref(&pkt);
3596 pthread_join(f->thread, NULL);
3598 av_thread_message_queue_free(&f->in_thread_queue);
3602 static int init_input_threads(void)
3606 if (nb_input_files == 1)
3609 for (i = 0; i < nb_input_files; i++) {
3610 InputFile *f = input_files[i];
3612 if (f->ctx->pb ? !f->ctx->pb->seekable :
3613 strcmp(f->ctx->iformat->name, "lavfi"))
3614 f->non_blocking = 1;
3615 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3616 f->thread_queue_size, sizeof(AVPacket));
3620 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3621 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3622 av_thread_message_queue_free(&f->in_thread_queue);
3623 return AVERROR(ret);
3629 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3631 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3633 AV_THREAD_MESSAGE_NONBLOCK : 0);
3637 static int get_input_packet(InputFile *f, AVPacket *pkt)
3641 for (i = 0; i < f->nb_streams; i++) {
3642 InputStream *ist = input_streams[f->ist_index + i];
3643 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3644 int64_t now = av_gettime_relative() - ist->start;
3646 return AVERROR(EAGAIN);
3651 if (nb_input_files > 1)
3652 return get_input_packet_mt(f, pkt);
3654 return av_read_frame(f->ctx, pkt);
3657 static int got_eagain(void)
3660 for (i = 0; i < nb_output_streams; i++)
3661 if (output_streams[i]->unavailable)
3666 static void reset_eagain(void)
3669 for (i = 0; i < nb_input_files; i++)
3670 input_files[i]->eagain = 0;
3671 for (i = 0; i < nb_output_streams; i++)
3672 output_streams[i]->unavailable = 0;
3675 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3676 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3677 AVRational time_base)
3683 return tmp_time_base;
3686 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3689 return tmp_time_base;
3695 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3698 AVCodecContext *avctx;
3699 int i, ret, has_audio = 0;
3700 int64_t duration = 0;
3702 ret = av_seek_frame(is, -1, is->start_time, 0);
3706 for (i = 0; i < ifile->nb_streams; i++) {
3707 ist = input_streams[ifile->ist_index + i];
3708 avctx = ist->dec_ctx;
3711 if (ist->decoding_needed) {
3712 process_input_packet(ist, NULL, 1);
3713 avcodec_flush_buffers(avctx);
3716 /* duration is the length of the last frame in a stream
3717 * when audio stream is present we don't care about
3718 * last video frame length because it's not defined exactly */
3719 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3723 for (i = 0; i < ifile->nb_streams; i++) {
3724 ist = input_streams[ifile->ist_index + i];
3725 avctx = ist->dec_ctx;
3728 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3729 AVRational sample_rate = {1, avctx->sample_rate};
3731 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3735 if (ist->framerate.num) {
3736 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3737 } else if (ist->st->avg_frame_rate.num) {
3738 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3739 } else duration = 1;
3741 if (!ifile->duration)
3742 ifile->time_base = ist->st->time_base;
3743 /* the total duration of the stream, max_pts - min_pts is
3744 * the duration of the stream without the last frame */
3745 duration += ist->max_pts - ist->min_pts;
3746 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3750 if (ifile->loop > 0)
3758 * - 0 -- one packet was read and processed
3759 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3760 * this function should be called again
3761 * - AVERROR_EOF -- this function should not be called again
3763 static int process_input(int file_index)
3765 InputFile *ifile = input_files[file_index];
3766 AVFormatContext *is;
3774 ret = get_input_packet(ifile, &pkt);
3776 if (ret == AVERROR(EAGAIN)) {
3780 if (ret < 0 && ifile->loop) {
3781 if ((ret = seek_to_start(ifile, is)) < 0)
3783 ret = get_input_packet(ifile, &pkt);
3786 if (ret != AVERROR_EOF) {
3787 print_error(is->filename, ret);
3792 for (i = 0; i < ifile->nb_streams; i++) {
3793 ist = input_streams[ifile->ist_index + i];
3794 if (ist->decoding_needed) {
3795 ret = process_input_packet(ist, NULL, 0);
3800 /* mark all outputs that don't go through lavfi as finished */
3801 for (j = 0; j < nb_output_streams; j++) {
3802 OutputStream *ost = output_streams[j];
3804 if (ost->source_index == ifile->ist_index + i &&
3805 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3806 finish_output_stream(ost);
3810 ifile->eof_reached = 1;
3811 return AVERROR(EAGAIN);
3817 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3818 is->streams[pkt.stream_index]);
3820 /* the following test is needed in case new streams appear
3821 dynamically in stream : we ignore them */
3822 if (pkt.stream_index >= ifile->nb_streams) {
3823 report_new_stream(file_index, &pkt);
3824 goto discard_packet;
3827 ist = input_streams[ifile->ist_index + pkt.stream_index];
3829 ist->data_size += pkt.size;
3833 goto discard_packet;
3835 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3836 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3841 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3842 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3843 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3844 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3845 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3846 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3847 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3848 av_ts2str(input_files[ist->file_index]->ts_offset),
3849 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3852 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3853 int64_t stime, stime2;
3854 // Correcting starttime based on the enabled streams
3855 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3856 // so we instead do it here as part of discontinuity handling
3857 if ( ist->next_dts == AV_NOPTS_VALUE
3858 && ifile->ts_offset == -is->start_time
3859 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3860 int64_t new_start_time = INT64_MAX;
3861 for (i=0; i<is->nb_streams; i++) {
3862 AVStream *st = is->streams[i];
3863 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3865 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3867 if (new_start_time > is->start_time) {
3868 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3869 ifile->ts_offset = -new_start_time;
3873 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3874 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3875 ist->wrap_correction_done = 1;
3877 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3878 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3879 ist->wrap_correction_done = 0;
3881 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3882 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3883 ist->wrap_correction_done = 0;
3887 /* add the stream-global side data to the first packet */
3888 if (ist->nb_packets == 1) {
3889 if (ist->st->nb_side_data)
3890 av_packet_split_side_data(&pkt);
3891 for (i = 0; i < ist->st->nb_side_data; i++) {
3892 AVPacketSideData *src_sd = &ist->st->side_data[i];
3895 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3897 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3900 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3904 memcpy(dst_data, src_sd->data, src_sd->size);
3908 if (pkt.dts != AV_NOPTS_VALUE)
3909 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3910 if (pkt.pts != AV_NOPTS_VALUE)
3911 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3913 if (pkt.pts != AV_NOPTS_VALUE)
3914 pkt.pts *= ist->ts_scale;
3915 if (pkt.dts != AV_NOPTS_VALUE)
3916 pkt.dts *= ist->ts_scale;
3918 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3919 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3920 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3921 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3922 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3923 int64_t delta = pkt_dts - ifile->last_ts;
3924 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3925 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3926 ifile->ts_offset -= delta;
3927 av_log(NULL, AV_LOG_DEBUG,
3928 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3929 delta, ifile->ts_offset);
3930 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3931 if (pkt.pts != AV_NOPTS_VALUE)
3932 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3936 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3937 if (pkt.pts != AV_NOPTS_VALUE) {
3938 pkt.pts += duration;
3939 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3940 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3943 if (pkt.dts != AV_NOPTS_VALUE)
3944 pkt.dts += duration;
3946 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3947 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3948 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3949 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3951 int64_t delta = pkt_dts - ist->next_dts;
3952 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3953 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3954 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3955 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3956 ifile->ts_offset -= delta;
3957 av_log(NULL, AV_LOG_DEBUG,
3958 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3959 delta, ifile->ts_offset);
3960 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3961 if (pkt.pts != AV_NOPTS_VALUE)
3962 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3965 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3966 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3967 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3968 pkt.dts = AV_NOPTS_VALUE;
3970 if (pkt.pts != AV_NOPTS_VALUE){
3971 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3972 delta = pkt_pts - ist->next_dts;
3973 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3974 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3975 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3976 pkt.pts = AV_NOPTS_VALUE;
3982 if (pkt.dts != AV_NOPTS_VALUE)
3983 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3986 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3987 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3988 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3989 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3990 av_ts2str(input_files[ist->file_index]->ts_offset),
3991 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3994 sub2video_heartbeat(ist, pkt.pts);
3996 process_input_packet(ist, &pkt, 0);
3999 av_packet_unref(&pkt);
4005 * Perform a step of transcoding for the specified filter graph.
4007 * @param[in] graph filter graph to consider
4008 * @param[out] best_ist input stream where a frame would allow to continue
4009 * @return 0 for success, <0 for error
4011 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4014 int nb_requests, nb_requests_max = 0;
4015 InputFilter *ifilter;
4019 ret = avfilter_graph_request_oldest(graph->graph);
4021 return reap_filters(0);
4023 if (ret == AVERROR_EOF) {
4024 ret = reap_filters(1);
4025 for (i = 0; i < graph->nb_outputs; i++)
4026 close_output_stream(graph->outputs[i]->ost);
4029 if (ret != AVERROR(EAGAIN))
4032 for (i = 0; i < graph->nb_inputs; i++) {
4033 ifilter = graph->inputs[i];
4035 if (input_files[ist->file_index]->eagain ||
4036 input_files[ist->file_index]->eof_reached)
4038 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4039 if (nb_requests > nb_requests_max) {
4040 nb_requests_max = nb_requests;
4046 for (i = 0; i < graph->nb_outputs; i++)
4047 graph->outputs[i]->ost->unavailable = 1;
4053 * Run a single step of transcoding.
4055 * @return 0 for success, <0 for error
4057 static int transcode_step(void)
4063 ost = choose_output();
4070 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4075 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4080 av_assert0(ost->source_index >= 0);
4081 ist = input_streams[ost->source_index];
4084 ret = process_input(ist->file_index);
4085 if (ret == AVERROR(EAGAIN)) {
4086 if (input_files[ist->file_index]->eagain)
4087 ost->unavailable = 1;
4092 return ret == AVERROR_EOF ? 0 : ret;
4094 return reap_filters(0);
4098 * The following code is the main loop of the file converter
4100 static int transcode(void)
4103 AVFormatContext *os;
4106 int64_t timer_start;
4107 int64_t total_packets_written = 0;
4109 ret = transcode_init();
4113 if (stdin_interaction) {
4114 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4117 timer_start = av_gettime_relative();
4120 if ((ret = init_input_threads()) < 0)
4124 while (!received_sigterm) {
4125 int64_t cur_time= av_gettime_relative();
4127 /* if 'q' pressed, exits */
4128 if (stdin_interaction)
4129 if (check_keyboard_interaction(cur_time) < 0)
4132 /* check if there's any stream where output is still needed */
4133 if (!need_output()) {
4134 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4138 ret = transcode_step();
4139 if (ret < 0 && ret != AVERROR_EOF) {
4141 av_strerror(ret, errbuf, sizeof(errbuf));
4143 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4147 /* dump report by using the output first video and audio streams */
4148 print_report(0, timer_start, cur_time);
4151 free_input_threads();
4154 /* at the end of stream, we must flush the decoder buffers */
4155 for (i = 0; i < nb_input_streams; i++) {
4156 ist = input_streams[i];
4157 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4158 process_input_packet(ist, NULL, 0);
4165 /* write the trailer if needed and close file */
4166 for (i = 0; i < nb_output_files; i++) {
4167 os = output_files[i]->ctx;
4168 if ((ret = av_write_trailer(os)) < 0) {
4169 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4175 /* dump report by using the first video and audio streams */
4176 print_report(1, timer_start, av_gettime_relative());
4178 /* close each encoder */
4179 for (i = 0; i < nb_output_streams; i++) {
4180 ost = output_streams[i];
4181 if (ost->encoding_needed) {
4182 av_freep(&ost->enc_ctx->stats_in);
4184 total_packets_written += ost->packets_written;
4187 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4188 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4192 /* close each decoder */
4193 for (i = 0; i < nb_input_streams; i++) {
4194 ist = input_streams[i];
4195 if (ist->decoding_needed) {
4196 avcodec_close(ist->dec_ctx);
4197 if (ist->hwaccel_uninit)
4198 ist->hwaccel_uninit(ist->dec_ctx);
4207 free_input_threads();
4210 if (output_streams) {
4211 for (i = 0; i < nb_output_streams; i++) {
4212 ost = output_streams[i];
4215 if (fclose(ost->logfile))
4216 av_log(NULL, AV_LOG_ERROR,
4217 "Error closing logfile, loss of information possible: %s\n",
4218 av_err2str(AVERROR(errno)));
4219 ost->logfile = NULL;
4221 av_freep(&ost->forced_kf_pts);
4222 av_freep(&ost->apad);
4223 av_freep(&ost->disposition);
4224 av_dict_free(&ost->encoder_opts);
4225 av_dict_free(&ost->sws_dict);
4226 av_dict_free(&ost->swr_opts);
4227 av_dict_free(&ost->resample_opts);
4235 static int64_t getutime(void)
4238 struct rusage rusage;
4240 getrusage(RUSAGE_SELF, &rusage);
4241 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4242 #elif HAVE_GETPROCESSTIMES
4244 FILETIME c, e, k, u;
4245 proc = GetCurrentProcess();
4246 GetProcessTimes(proc, &c, &e, &k, &u);
4247 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4249 return av_gettime_relative();
4253 static int64_t getmaxrss(void)
4255 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4256 struct rusage rusage;
4257 getrusage(RUSAGE_SELF, &rusage);
4258 return (int64_t)rusage.ru_maxrss * 1024;
4259 #elif HAVE_GETPROCESSMEMORYINFO
4261 PROCESS_MEMORY_COUNTERS memcounters;
4262 proc = GetCurrentProcess();
4263 memcounters.cb = sizeof(memcounters);
4264 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4265 return memcounters.PeakPagefileUsage;
4271 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4275 int main(int argc, char **argv)
4280 register_exit(ffmpeg_cleanup);
4282 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4284 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4285 parse_loglevel(argc, argv, options);
4287 if(argc>1 && !strcmp(argv[1], "-d")){
4289 av_log_set_callback(log_callback_null);
4294 avcodec_register_all();
4296 avdevice_register_all();
4298 avfilter_register_all();
4300 avformat_network_init();
4302 show_banner(argc, argv, options);
4306 /* parse options and open all input/output files */
4307 ret = ffmpeg_parse_options(argc, argv);
4311 if (nb_output_files <= 0 && nb_input_files == 0) {
4313 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4317 /* file converter / grab */
4318 if (nb_output_files <= 0) {
4319 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4323 // if (nb_input_files == 0) {
4324 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4328 current_time = ti = getutime();
4329 if (transcode() < 0)
4331 ti = getutime() - ti;
4333 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4335 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4336 decode_error_stat[0], decode_error_stat[1]);
4337 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4340 exit_program(received_nb_signals ? 255 : main_return_code);
4341 return main_return_code;