2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
698 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
699 if (pkt->dts != AV_NOPTS_VALUE &&
700 pkt->pts != AV_NOPTS_VALUE &&
701 pkt->dts > pkt->pts) {
702 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
704 ost->file_index, ost->st->index);
706 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
707 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
708 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
711 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
712 pkt->dts != AV_NOPTS_VALUE &&
713 ost->last_mux_dts != AV_NOPTS_VALUE) {
714 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
715 if (pkt->dts < max) {
716 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
717 av_log(s, loglevel, "Non-monotonous DTS in output stream "
718 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
719 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
721 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
724 av_log(s, loglevel, "changing to %"PRId64". This may result "
725 "in incorrect timestamps in the output file.\n",
727 if(pkt->pts >= pkt->dts)
728 pkt->pts = FFMAX(pkt->pts, max);
733 ost->last_mux_dts = pkt->dts;
735 ost->data_size += pkt->size;
736 ost->packets_written++;
738 pkt->stream_index = ost->index;
741 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
742 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
743 av_get_media_type_string(ost->enc_ctx->codec_type),
744 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
745 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
750 ret = av_interleaved_write_frame(s, pkt);
752 print_error("av_interleaved_write_frame()", ret);
753 main_return_code = 1;
754 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
756 av_packet_unref(pkt);
759 static void close_output_stream(OutputStream *ost)
761 OutputFile *of = output_files[ost->file_index];
763 ost->finished |= ENCODER_FINISHED;
765 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
766 of->recording_time = FFMIN(of->recording_time, end);
770 static int check_recording_time(OutputStream *ost)
772 OutputFile *of = output_files[ost->file_index];
774 if (of->recording_time != INT64_MAX &&
775 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
776 AV_TIME_BASE_Q) >= 0) {
777 close_output_stream(ost);
783 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
786 AVCodecContext *enc = ost->enc_ctx;
790 av_init_packet(&pkt);
794 if (!check_recording_time(ost))
797 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
798 frame->pts = ost->sync_opts;
799 ost->sync_opts = frame->pts + frame->nb_samples;
800 ost->samples_encoded += frame->nb_samples;
801 ost->frames_encoded++;
803 av_assert0(pkt.size || !pkt.data);
804 update_benchmark(NULL);
806 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
807 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
808 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
809 enc->time_base.num, enc->time_base.den);
812 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
813 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
816 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
819 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
822 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
823 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
824 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
825 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
828 write_frame(s, &pkt, ost);
832 static void do_subtitle_out(AVFormatContext *s,
837 int subtitle_out_max_size = 1024 * 1024;
838 int subtitle_out_size, nb, i;
843 if (sub->pts == AV_NOPTS_VALUE) {
844 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
853 subtitle_out = av_malloc(subtitle_out_max_size);
855 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
860 /* Note: DVB subtitle need one packet to draw them and one other
861 packet to clear them */
862 /* XXX: signal it in the codec context ? */
863 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
868 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
870 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
871 pts -= output_files[ost->file_index]->start_time;
872 for (i = 0; i < nb; i++) {
873 unsigned save_num_rects = sub->num_rects;
875 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
876 if (!check_recording_time(ost))
880 // start_display_time is required to be 0
881 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
882 sub->end_display_time -= sub->start_display_time;
883 sub->start_display_time = 0;
887 ost->frames_encoded++;
889 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
890 subtitle_out_max_size, sub);
892 sub->num_rects = save_num_rects;
893 if (subtitle_out_size < 0) {
894 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
898 av_init_packet(&pkt);
899 pkt.data = subtitle_out;
900 pkt.size = subtitle_out_size;
901 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
902 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
903 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
904 /* XXX: the pts correction is handled here. Maybe handling
905 it in the codec would be better */
907 pkt.pts += 90 * sub->start_display_time;
909 pkt.pts += 90 * sub->end_display_time;
912 write_frame(s, &pkt, ost);
916 static void do_video_out(AVFormatContext *s,
918 AVFrame *next_picture,
921 int ret, format_video_sync;
923 AVCodecContext *enc = ost->enc_ctx;
924 AVCodecContext *mux_enc = ost->st->codec;
925 int nb_frames, nb0_frames, i;
926 double delta, delta0;
929 InputStream *ist = NULL;
930 AVFilterContext *filter = ost->filter->filter;
932 if (ost->source_index >= 0)
933 ist = input_streams[ost->source_index];
935 if (filter->inputs[0]->frame_rate.num > 0 &&
936 filter->inputs[0]->frame_rate.den > 0)
937 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
939 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
940 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
942 if (!ost->filters_script &&
946 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
947 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
952 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
953 ost->last_nb0_frames[1],
954 ost->last_nb0_frames[2]);
956 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
957 delta = delta0 + duration;
959 /* by default, we output a single frame */
960 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
963 format_video_sync = video_sync_method;
964 if (format_video_sync == VSYNC_AUTO) {
965 if(!strcmp(s->oformat->name, "avi")) {
966 format_video_sync = VSYNC_VFR;
968 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
970 && format_video_sync == VSYNC_CFR
971 && input_files[ist->file_index]->ctx->nb_streams == 1
972 && input_files[ist->file_index]->input_ts_offset == 0) {
973 format_video_sync = VSYNC_VSCFR;
975 if (format_video_sync == VSYNC_CFR && copy_ts) {
976 format_video_sync = VSYNC_VSCFR;
979 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
983 format_video_sync != VSYNC_PASSTHROUGH &&
984 format_video_sync != VSYNC_DROP) {
986 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
988 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
989 sync_ipts = ost->sync_opts;
994 switch (format_video_sync) {
996 if (ost->frame_number == 0 && delta0 >= 0.5) {
997 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1000 ost->sync_opts = lrint(sync_ipts);
1003 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1004 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1006 } else if (delta < -1.1)
1008 else if (delta > 1.1) {
1009 nb_frames = lrintf(delta);
1011 nb0_frames = lrintf(delta0 - 0.6);
1017 else if (delta > 0.6)
1018 ost->sync_opts = lrint(sync_ipts);
1021 case VSYNC_PASSTHROUGH:
1022 ost->sync_opts = lrint(sync_ipts);
1029 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1030 nb0_frames = FFMIN(nb0_frames, nb_frames);
1032 memmove(ost->last_nb0_frames + 1,
1033 ost->last_nb0_frames,
1034 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1035 ost->last_nb0_frames[0] = nb0_frames;
1037 if (nb0_frames == 0 && ost->last_dropped) {
1039 av_log(NULL, AV_LOG_VERBOSE,
1040 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1041 ost->frame_number, ost->st->index, ost->last_frame->pts);
1043 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1044 if (nb_frames > dts_error_threshold * 30) {
1045 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1049 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1050 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1052 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1054 /* duplicates frame if needed */
1055 for (i = 0; i < nb_frames; i++) {
1056 AVFrame *in_picture;
1057 av_init_packet(&pkt);
1061 if (i < nb0_frames && ost->last_frame) {
1062 in_picture = ost->last_frame;
1064 in_picture = next_picture;
1069 in_picture->pts = ost->sync_opts;
1072 if (!check_recording_time(ost))
1074 if (ost->frame_number >= ost->max_frames)
1078 #if FF_API_LAVF_FMT_RAWPICTURE
1079 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1080 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1081 /* raw pictures are written as AVPicture structure to
1082 avoid any copies. We support temporarily the older
1084 if (in_picture->interlaced_frame)
1085 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1087 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1088 pkt.data = (uint8_t *)in_picture;
1089 pkt.size = sizeof(AVPicture);
1090 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1091 pkt.flags |= AV_PKT_FLAG_KEY;
1093 write_frame(s, &pkt, ost);
1097 int got_packet, forced_keyframe = 0;
1100 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1101 ost->top_field_first >= 0)
1102 in_picture->top_field_first = !!ost->top_field_first;
1104 if (in_picture->interlaced_frame) {
1105 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1106 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1108 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1110 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1112 in_picture->quality = enc->global_quality;
1113 in_picture->pict_type = 0;
1115 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1116 in_picture->pts * av_q2d(enc->time_base) : NAN;
1117 if (ost->forced_kf_index < ost->forced_kf_count &&
1118 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1119 ost->forced_kf_index++;
1120 forced_keyframe = 1;
1121 } else if (ost->forced_keyframes_pexpr) {
1123 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1124 res = av_expr_eval(ost->forced_keyframes_pexpr,
1125 ost->forced_keyframes_expr_const_values, NULL);
1126 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1127 ost->forced_keyframes_expr_const_values[FKF_N],
1128 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1130 ost->forced_keyframes_expr_const_values[FKF_T],
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1134 forced_keyframe = 1;
1135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1136 ost->forced_keyframes_expr_const_values[FKF_N];
1137 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1138 ost->forced_keyframes_expr_const_values[FKF_T];
1139 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1142 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1143 } else if ( ost->forced_keyframes
1144 && !strncmp(ost->forced_keyframes, "source", 6)
1145 && in_picture->key_frame==1) {
1146 forced_keyframe = 1;
1149 if (forced_keyframe) {
1150 in_picture->pict_type = AV_PICTURE_TYPE_I;
1151 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1154 update_benchmark(NULL);
1156 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1157 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1158 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1159 enc->time_base.num, enc->time_base.den);
1162 ost->frames_encoded++;
1164 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1165 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1167 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1173 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1174 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1175 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1176 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1179 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1180 pkt.pts = ost->sync_opts;
1182 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1185 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1188 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1191 frame_size = pkt.size;
1192 write_frame(s, &pkt, ost);
1194 /* if two pass, output log */
1195 if (ost->logfile && enc->stats_out) {
1196 fprintf(ost->logfile, "%s", enc->stats_out);
1202 * For video, number of frames in == number of packets out.
1203 * But there may be reordering, so we can't throw away frames on encoder
1204 * flush, we need to limit them here, before they go into encoder.
1206 ost->frame_number++;
1208 if (vstats_filename && frame_size)
1209 do_video_stats(ost, frame_size);
1212 if (!ost->last_frame)
1213 ost->last_frame = av_frame_alloc();
1214 av_frame_unref(ost->last_frame);
1215 if (next_picture && ost->last_frame)
1216 av_frame_ref(ost->last_frame, next_picture);
1218 av_frame_free(&ost->last_frame);
1221 static double psnr(double d)
1223 return -10.0 * log10(d);
1226 static void do_video_stats(OutputStream *ost, int frame_size)
1228 AVCodecContext *enc;
1230 double ti1, bitrate, avg_bitrate;
1232 /* this is executed just the first time do_video_stats is called */
1234 vstats_file = fopen(vstats_filename, "w");
1242 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1243 frame_number = ost->st->nb_frames;
1244 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1245 ost->quality / (float)FF_QP2LAMBDA);
1247 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1248 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1250 fprintf(vstats_file,"f_size= %6d ", frame_size);
1251 /* compute pts value */
1252 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1256 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1257 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1258 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1259 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1260 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1264 static void finish_output_stream(OutputStream *ost)
1266 OutputFile *of = output_files[ost->file_index];
1269 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1272 for (i = 0; i < of->ctx->nb_streams; i++)
1273 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1278 * Get and encode new output from any of the filtergraphs, without causing
1281 * @return 0 for success, <0 for severe errors
1283 static int reap_filters(int flush)
1285 AVFrame *filtered_frame = NULL;
1288 /* Reap all buffers present in the buffer sinks */
1289 for (i = 0; i < nb_output_streams; i++) {
1290 OutputStream *ost = output_streams[i];
1291 OutputFile *of = output_files[ost->file_index];
1292 AVFilterContext *filter;
1293 AVCodecContext *enc = ost->enc_ctx;
1298 filter = ost->filter->filter;
1300 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1301 return AVERROR(ENOMEM);
1303 filtered_frame = ost->filtered_frame;
1306 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1307 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1308 AV_BUFFERSINK_FLAG_NO_REQUEST);
1310 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1311 av_log(NULL, AV_LOG_WARNING,
1312 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1313 } else if (flush && ret == AVERROR_EOF) {
1314 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1315 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1319 if (ost->finished) {
1320 av_frame_unref(filtered_frame);
1323 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1324 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1325 AVRational tb = enc->time_base;
1326 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1328 tb.den <<= extra_bits;
1330 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1331 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1332 float_pts /= 1 << extra_bits;
1333 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1334 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1336 filtered_frame->pts =
1337 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1338 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1340 //if (ost->source_index >= 0)
1341 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1343 switch (filter->inputs[0]->type) {
1344 case AVMEDIA_TYPE_VIDEO:
1345 if (!ost->frame_aspect_ratio.num)
1346 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1349 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1350 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1352 enc->time_base.num, enc->time_base.den);
1355 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1357 case AVMEDIA_TYPE_AUDIO:
1358 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1359 enc->channels != av_frame_get_channels(filtered_frame)) {
1360 av_log(NULL, AV_LOG_ERROR,
1361 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1364 do_audio_out(of->ctx, ost, filtered_frame);
1367 // TODO support subtitle filters
1371 av_frame_unref(filtered_frame);
1378 static void print_final_stats(int64_t total_size)
1380 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1381 uint64_t subtitle_size = 0;
1382 uint64_t data_size = 0;
1383 float percent = -1.0;
1387 for (i = 0; i < nb_output_streams; i++) {
1388 OutputStream *ost = output_streams[i];
1389 switch (ost->enc_ctx->codec_type) {
1390 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1391 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1392 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1393 default: other_size += ost->data_size; break;
1395 extra_size += ost->enc_ctx->extradata_size;
1396 data_size += ost->data_size;
1397 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1398 != AV_CODEC_FLAG_PASS1)
1402 if (data_size && total_size>0 && total_size >= data_size)
1403 percent = 100.0 * (total_size - data_size) / data_size;
1405 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1406 video_size / 1024.0,
1407 audio_size / 1024.0,
1408 subtitle_size / 1024.0,
1409 other_size / 1024.0,
1410 extra_size / 1024.0);
1412 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1414 av_log(NULL, AV_LOG_INFO, "unknown");
1415 av_log(NULL, AV_LOG_INFO, "\n");
1417 /* print verbose per-stream stats */
1418 for (i = 0; i < nb_input_files; i++) {
1419 InputFile *f = input_files[i];
1420 uint64_t total_packets = 0, total_size = 0;
1422 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1423 i, f->ctx->filename);
1425 for (j = 0; j < f->nb_streams; j++) {
1426 InputStream *ist = input_streams[f->ist_index + j];
1427 enum AVMediaType type = ist->dec_ctx->codec_type;
1429 total_size += ist->data_size;
1430 total_packets += ist->nb_packets;
1432 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1433 i, j, media_type_string(type));
1434 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1435 ist->nb_packets, ist->data_size);
1437 if (ist->decoding_needed) {
1438 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1439 ist->frames_decoded);
1440 if (type == AVMEDIA_TYPE_AUDIO)
1441 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1442 av_log(NULL, AV_LOG_VERBOSE, "; ");
1445 av_log(NULL, AV_LOG_VERBOSE, "\n");
1448 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1449 total_packets, total_size);
1452 for (i = 0; i < nb_output_files; i++) {
1453 OutputFile *of = output_files[i];
1454 uint64_t total_packets = 0, total_size = 0;
1456 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1457 i, of->ctx->filename);
1459 for (j = 0; j < of->ctx->nb_streams; j++) {
1460 OutputStream *ost = output_streams[of->ost_index + j];
1461 enum AVMediaType type = ost->enc_ctx->codec_type;
1463 total_size += ost->data_size;
1464 total_packets += ost->packets_written;
1466 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1467 i, j, media_type_string(type));
1468 if (ost->encoding_needed) {
1469 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1470 ost->frames_encoded);
1471 if (type == AVMEDIA_TYPE_AUDIO)
1472 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1473 av_log(NULL, AV_LOG_VERBOSE, "; ");
1476 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1477 ost->packets_written, ost->data_size);
1479 av_log(NULL, AV_LOG_VERBOSE, "\n");
1482 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1483 total_packets, total_size);
1485 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1486 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1488 av_log(NULL, AV_LOG_WARNING, "\n");
1490 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1495 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1498 AVBPrint buf_script;
1500 AVFormatContext *oc;
1502 AVCodecContext *enc;
1503 int frame_number, vid, i;
1506 int64_t pts = INT64_MIN + 1;
1507 static int64_t last_time = -1;
1508 static int qp_histogram[52];
1509 int hours, mins, secs, us;
1513 if (!print_stats && !is_last_report && !progress_avio)
1516 if (!is_last_report) {
1517 if (last_time == -1) {
1518 last_time = cur_time;
1521 if ((cur_time - last_time) < 500000)
1523 last_time = cur_time;
1526 t = (cur_time-timer_start) / 1000000.0;
1529 oc = output_files[0]->ctx;
1531 total_size = avio_size(oc->pb);
1532 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1533 total_size = avio_tell(oc->pb);
1537 av_bprint_init(&buf_script, 0, 1);
1538 for (i = 0; i < nb_output_streams; i++) {
1540 ost = output_streams[i];
1542 if (!ost->stream_copy)
1543 q = ost->quality / (float) FF_QP2LAMBDA;
1545 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1547 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1548 ost->file_index, ost->index, q);
1550 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1553 frame_number = ost->frame_number;
1554 fps = t > 1 ? frame_number / t : 0;
1555 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1556 frame_number, fps < 9.95, fps, q);
1557 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1558 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1559 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560 ost->file_index, ost->index, q);
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1566 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1568 for (j = 0; j < 32; j++)
1569 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1572 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1574 double error, error_sum = 0;
1575 double scale, scale_sum = 0;
1577 char type[3] = { 'Y','U','V' };
1578 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1579 for (j = 0; j < 3; j++) {
1580 if (is_last_report) {
1581 error = enc->error[j];
1582 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1584 error = ost->error[j];
1585 scale = enc->width * enc->height * 255.0 * 255.0;
1591 p = psnr(error / scale);
1592 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1593 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1594 ost->file_index, ost->index, type[j] | 32, p);
1596 p = psnr(error_sum / scale_sum);
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1598 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1599 ost->file_index, ost->index, p);
1603 /* compute min output value */
1604 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1605 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1606 ost->st->time_base, AV_TIME_BASE_Q));
1608 nb_frames_drop += ost->last_dropped;
1611 secs = FFABS(pts) / AV_TIME_BASE;
1612 us = FFABS(pts) % AV_TIME_BASE;
1618 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1619 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1621 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1623 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624 "size=%8.0fkB time=", total_size / 1024.0);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1629 (100 * us) / AV_TIME_BASE);
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633 av_bprintf(&buf_script, "bitrate=N/A\n");
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1639 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643 hours, mins, secs, us);
1645 if (nb_frames_dup || nb_frames_drop)
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1647 nb_frames_dup, nb_frames_drop);
1648 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1653 av_bprintf(&buf_script, "speed=N/A\n");
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1656 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1659 if (print_stats || is_last_report) {
1660 const char end = is_last_report ? '\n' : '\r';
1661 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662 fprintf(stderr, "%s %c", buf, end);
1664 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1669 if (progress_avio) {
1670 av_bprintf(&buf_script, "progress=%s\n",
1671 is_last_report ? "end" : "continue");
1672 avio_write(progress_avio, buf_script.str,
1673 FFMIN(buf_script.len, buf_script.size - 1));
1674 avio_flush(progress_avio);
1675 av_bprint_finalize(&buf_script, NULL);
1676 if (is_last_report) {
1677 if ((ret = avio_closep(&progress_avio)) < 0)
1678 av_log(NULL, AV_LOG_ERROR,
1679 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1684 print_final_stats(total_size);
1687 static void flush_encoders(void)
1691 for (i = 0; i < nb_output_streams; i++) {
1692 OutputStream *ost = output_streams[i];
1693 AVCodecContext *enc = ost->enc_ctx;
1694 AVFormatContext *os = output_files[ost->file_index]->ctx;
1695 int stop_encoding = 0;
1697 if (!ost->encoding_needed)
1700 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1702 #if FF_API_LAVF_FMT_RAWPICTURE
1703 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1708 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1711 switch (enc->codec_type) {
1712 case AVMEDIA_TYPE_AUDIO:
1713 encode = avcodec_encode_audio2;
1716 case AVMEDIA_TYPE_VIDEO:
1717 encode = avcodec_encode_video2;
1728 av_init_packet(&pkt);
1732 update_benchmark(NULL);
1733 ret = encode(enc, &pkt, NULL, &got_packet);
1734 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1736 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1741 if (ost->logfile && enc->stats_out) {
1742 fprintf(ost->logfile, "%s", enc->stats_out);
1748 if (ost->finished & MUXER_FINISHED) {
1749 av_packet_unref(&pkt);
1752 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1753 pkt_size = pkt.size;
1754 write_frame(os, &pkt, ost);
1755 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1756 do_video_stats(ost, pkt_size);
1767 * Check whether a packet from ist should be written into ost at this time
1769 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1771 OutputFile *of = output_files[ost->file_index];
1772 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1774 if (ost->source_index != ist_index)
1780 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1786 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1788 OutputFile *of = output_files[ost->file_index];
1789 InputFile *f = input_files [ist->file_index];
1790 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1791 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1795 av_init_packet(&opkt);
1797 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1798 !ost->copy_initial_nonkeyframes)
1801 if (!ost->frame_number && !ost->copy_prior_start) {
1802 int64_t comp_start = start_time;
1803 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1804 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1805 if (pkt->pts == AV_NOPTS_VALUE ?
1806 ist->pts < comp_start :
1807 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1811 if (of->recording_time != INT64_MAX &&
1812 ist->pts >= of->recording_time + start_time) {
1813 close_output_stream(ost);
1817 if (f->recording_time != INT64_MAX) {
1818 start_time = f->ctx->start_time;
1819 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1820 start_time += f->start_time;
1821 if (ist->pts >= f->recording_time + start_time) {
1822 close_output_stream(ost);
1827 /* force the input stream PTS */
1828 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1831 if (pkt->pts != AV_NOPTS_VALUE)
1832 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1834 opkt.pts = AV_NOPTS_VALUE;
1836 if (pkt->dts == AV_NOPTS_VALUE)
1837 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1839 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1840 opkt.dts -= ost_tb_start_time;
1842 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1843 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1845 duration = ist->dec_ctx->frame_size;
1846 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1847 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1848 ost->st->time_base) - ost_tb_start_time;
1851 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1852 opkt.flags = pkt->flags;
1853 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1854 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1855 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1856 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1857 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1859 int ret = av_parser_change(ost->parser, ost->st->codec,
1860 &opkt.data, &opkt.size,
1861 pkt->data, pkt->size,
1862 pkt->flags & AV_PKT_FLAG_KEY);
1864 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1869 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1874 opkt.data = pkt->data;
1875 opkt.size = pkt->size;
1877 av_copy_packet_side_data(&opkt, pkt);
1879 #if FF_API_LAVF_FMT_RAWPICTURE
1880 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1881 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1882 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1883 /* store AVPicture in AVPacket, as expected by the output format */
1884 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1886 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1890 opkt.data = (uint8_t *)&pict;
1891 opkt.size = sizeof(AVPicture);
1892 opkt.flags |= AV_PKT_FLAG_KEY;
1896 write_frame(of->ctx, &opkt, ost);
1899 int guess_input_channel_layout(InputStream *ist)
1901 AVCodecContext *dec = ist->dec_ctx;
1903 if (!dec->channel_layout) {
1904 char layout_name[256];
1906 if (dec->channels > ist->guess_layout_max)
1908 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1909 if (!dec->channel_layout)
1911 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1912 dec->channels, dec->channel_layout);
1913 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1914 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1919 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1921 if (*got_output || ret<0)
1922 decode_error_stat[ret<0] ++;
1924 if (ret < 0 && exit_on_error)
1927 if (exit_on_error && *got_output && ist) {
1928 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1929 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1935 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1937 AVFrame *decoded_frame, *f;
1938 AVCodecContext *avctx = ist->dec_ctx;
1939 int i, ret, err = 0, resample_changed;
1940 AVRational decoded_frame_tb;
1942 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1943 return AVERROR(ENOMEM);
1944 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1945 return AVERROR(ENOMEM);
1946 decoded_frame = ist->decoded_frame;
1948 update_benchmark(NULL);
1949 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1950 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1952 if (ret >= 0 && avctx->sample_rate <= 0) {
1953 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1954 ret = AVERROR_INVALIDDATA;
1957 check_decode_result(ist, got_output, ret);
1959 if (!*got_output || ret < 0)
1962 ist->samples_decoded += decoded_frame->nb_samples;
1963 ist->frames_decoded++;
1966 /* increment next_dts to use for the case where the input stream does not
1967 have timestamps or there are multiple frames in the packet */
1968 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1970 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1975 ist->resample_channels != avctx->channels ||
1976 ist->resample_channel_layout != decoded_frame->channel_layout ||
1977 ist->resample_sample_rate != decoded_frame->sample_rate;
1978 if (resample_changed) {
1979 char layout1[64], layout2[64];
1981 if (!guess_input_channel_layout(ist)) {
1982 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1983 "layout for Input Stream #%d.%d\n", ist->file_index,
1987 decoded_frame->channel_layout = avctx->channel_layout;
1989 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1990 ist->resample_channel_layout);
1991 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1992 decoded_frame->channel_layout);
1994 av_log(NULL, AV_LOG_INFO,
1995 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1996 ist->file_index, ist->st->index,
1997 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1998 ist->resample_channels, layout1,
1999 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2000 avctx->channels, layout2);
2002 ist->resample_sample_fmt = decoded_frame->format;
2003 ist->resample_sample_rate = decoded_frame->sample_rate;
2004 ist->resample_channel_layout = decoded_frame->channel_layout;
2005 ist->resample_channels = avctx->channels;
2007 for (i = 0; i < nb_filtergraphs; i++)
2008 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2009 FilterGraph *fg = filtergraphs[i];
2010 if (configure_filtergraph(fg) < 0) {
2011 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2017 /* if the decoder provides a pts, use it instead of the last packet pts.
2018 the decoder could be delaying output by a packet or more. */
2019 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2020 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2021 decoded_frame_tb = avctx->time_base;
2022 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2023 decoded_frame->pts = decoded_frame->pkt_pts;
2024 decoded_frame_tb = ist->st->time_base;
2025 } else if (pkt->pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = pkt->pts;
2027 decoded_frame_tb = ist->st->time_base;
2029 decoded_frame->pts = ist->dts;
2030 decoded_frame_tb = AV_TIME_BASE_Q;
2032 pkt->pts = AV_NOPTS_VALUE;
2033 if (decoded_frame->pts != AV_NOPTS_VALUE)
2034 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2035 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2036 (AVRational){1, avctx->sample_rate});
2037 ist->nb_samples = decoded_frame->nb_samples;
2038 for (i = 0; i < ist->nb_filters; i++) {
2039 if (i < ist->nb_filters - 1) {
2040 f = ist->filter_frame;
2041 err = av_frame_ref(f, decoded_frame);
2046 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2047 AV_BUFFERSRC_FLAG_PUSH);
2048 if (err == AVERROR_EOF)
2049 err = 0; /* ignore */
2053 decoded_frame->pts = AV_NOPTS_VALUE;
2055 av_frame_unref(ist->filter_frame);
2056 av_frame_unref(decoded_frame);
2057 return err < 0 ? err : ret;
2060 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2062 AVFrame *decoded_frame, *f;
2063 int i, ret = 0, err = 0, resample_changed;
2064 int64_t best_effort_timestamp;
2065 AVRational *frame_sample_aspect;
2067 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2068 return AVERROR(ENOMEM);
2069 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 decoded_frame = ist->decoded_frame;
2072 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2074 update_benchmark(NULL);
2075 ret = avcodec_decode_video2(ist->dec_ctx,
2076 decoded_frame, got_output, pkt);
2077 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2079 // The following line may be required in some cases where there is no parser
2080 // or the parser does not has_b_frames correctly
2081 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2082 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2083 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2085 av_log(ist->dec_ctx, AV_LOG_WARNING,
2086 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2087 "If you want to help, upload a sample "
2088 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2089 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2090 ist->dec_ctx->has_b_frames,
2091 ist->st->codec->has_b_frames);
2094 check_decode_result(ist, got_output, ret);
2096 if (*got_output && ret >= 0) {
2097 if (ist->dec_ctx->width != decoded_frame->width ||
2098 ist->dec_ctx->height != decoded_frame->height ||
2099 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2100 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2101 decoded_frame->width,
2102 decoded_frame->height,
2103 decoded_frame->format,
2104 ist->dec_ctx->width,
2105 ist->dec_ctx->height,
2106 ist->dec_ctx->pix_fmt);
2110 if (!*got_output || ret < 0)
2113 if(ist->top_field_first>=0)
2114 decoded_frame->top_field_first = ist->top_field_first;
2116 ist->frames_decoded++;
2118 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2119 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2123 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2125 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2126 if(best_effort_timestamp != AV_NOPTS_VALUE)
2127 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2130 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2131 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2132 ist->st->index, av_ts2str(decoded_frame->pts),
2133 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2134 best_effort_timestamp,
2135 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2136 decoded_frame->key_frame, decoded_frame->pict_type,
2137 ist->st->time_base.num, ist->st->time_base.den);
2142 if (ist->st->sample_aspect_ratio.num)
2143 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2145 resample_changed = ist->resample_width != decoded_frame->width ||
2146 ist->resample_height != decoded_frame->height ||
2147 ist->resample_pix_fmt != decoded_frame->format;
2148 if (resample_changed) {
2149 av_log(NULL, AV_LOG_INFO,
2150 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2151 ist->file_index, ist->st->index,
2152 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2153 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2155 ist->resample_width = decoded_frame->width;
2156 ist->resample_height = decoded_frame->height;
2157 ist->resample_pix_fmt = decoded_frame->format;
2159 for (i = 0; i < nb_filtergraphs; i++) {
2160 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2161 configure_filtergraph(filtergraphs[i]) < 0) {
2162 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2168 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2169 for (i = 0; i < ist->nb_filters; i++) {
2170 if (!frame_sample_aspect->num)
2171 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2173 if (i < ist->nb_filters - 1) {
2174 f = ist->filter_frame;
2175 err = av_frame_ref(f, decoded_frame);
2180 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2181 if (ret == AVERROR_EOF) {
2182 ret = 0; /* ignore */
2183 } else if (ret < 0) {
2184 av_log(NULL, AV_LOG_FATAL,
2185 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2191 av_frame_unref(ist->filter_frame);
2192 av_frame_unref(decoded_frame);
2193 return err < 0 ? err : ret;
2196 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2198 AVSubtitle subtitle;
2199 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2200 &subtitle, got_output, pkt);
2202 check_decode_result(NULL, got_output, ret);
2204 if (ret < 0 || !*got_output) {
2206 sub2video_flush(ist);
2210 if (ist->fix_sub_duration) {
2212 if (ist->prev_sub.got_output) {
2213 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2214 1000, AV_TIME_BASE);
2215 if (end < ist->prev_sub.subtitle.end_display_time) {
2216 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2217 "Subtitle duration reduced from %d to %d%s\n",
2218 ist->prev_sub.subtitle.end_display_time, end,
2219 end <= 0 ? ", dropping it" : "");
2220 ist->prev_sub.subtitle.end_display_time = end;
2223 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2224 FFSWAP(int, ret, ist->prev_sub.ret);
2225 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2233 sub2video_update(ist, &subtitle);
2235 if (!subtitle.num_rects)
2238 ist->frames_decoded++;
2240 for (i = 0; i < nb_output_streams; i++) {
2241 OutputStream *ost = output_streams[i];
2243 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2244 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2247 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2251 avsubtitle_free(&subtitle);
2255 static int send_filter_eof(InputStream *ist)
2258 for (i = 0; i < ist->nb_filters; i++) {
2259 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2266 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2267 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2273 if (!ist->saw_first_ts) {
2274 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2276 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2277 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2278 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2280 ist->saw_first_ts = 1;
2283 if (ist->next_dts == AV_NOPTS_VALUE)
2284 ist->next_dts = ist->dts;
2285 if (ist->next_pts == AV_NOPTS_VALUE)
2286 ist->next_pts = ist->pts;
2290 av_init_packet(&avpkt);
2298 if (pkt->dts != AV_NOPTS_VALUE) {
2299 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2300 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2301 ist->next_pts = ist->pts = ist->dts;
2304 // while we have more to decode or while the decoder did output something on EOF
2305 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2309 ist->pts = ist->next_pts;
2310 ist->dts = ist->next_dts;
2312 if (avpkt.size && avpkt.size != pkt->size &&
2313 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2314 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2315 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2316 ist->showed_multi_packet_warning = 1;
2319 switch (ist->dec_ctx->codec_type) {
2320 case AVMEDIA_TYPE_AUDIO:
2321 ret = decode_audio (ist, &avpkt, &got_output);
2323 case AVMEDIA_TYPE_VIDEO:
2324 ret = decode_video (ist, &avpkt, &got_output);
2325 if (avpkt.duration) {
2326 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2327 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2328 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2329 duration = ((int64_t)AV_TIME_BASE *
2330 ist->dec_ctx->framerate.den * ticks) /
2331 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2335 if(ist->dts != AV_NOPTS_VALUE && duration) {
2336 ist->next_dts += duration;
2338 ist->next_dts = AV_NOPTS_VALUE;
2341 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2343 case AVMEDIA_TYPE_SUBTITLE:
2344 ret = transcode_subtitles(ist, &avpkt, &got_output);
2351 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2352 ist->file_index, ist->st->index, av_err2str(ret));
2359 avpkt.pts= AV_NOPTS_VALUE;
2361 // touch data and size only if not EOF
2363 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2371 if (got_output && !pkt)
2375 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2376 /* except when looping we need to flush but not to send an EOF */
2377 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2378 int ret = send_filter_eof(ist);
2380 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2385 /* handle stream copy */
2386 if (!ist->decoding_needed) {
2387 ist->dts = ist->next_dts;
2388 switch (ist->dec_ctx->codec_type) {
2389 case AVMEDIA_TYPE_AUDIO:
2390 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2391 ist->dec_ctx->sample_rate;
2393 case AVMEDIA_TYPE_VIDEO:
2394 if (ist->framerate.num) {
2395 // TODO: Remove work-around for c99-to-c89 issue 7
2396 AVRational time_base_q = AV_TIME_BASE_Q;
2397 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2398 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2399 } else if (pkt->duration) {
2400 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2401 } else if(ist->dec_ctx->framerate.num != 0) {
2402 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2403 ist->next_dts += ((int64_t)AV_TIME_BASE *
2404 ist->dec_ctx->framerate.den * ticks) /
2405 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2409 ist->pts = ist->dts;
2410 ist->next_pts = ist->next_dts;
2412 for (i = 0; pkt && i < nb_output_streams; i++) {
2413 OutputStream *ost = output_streams[i];
2415 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2418 do_streamcopy(ist, ost, pkt);
2424 static void print_sdp(void)
2429 AVIOContext *sdp_pb;
2430 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2434 for (i = 0, j = 0; i < nb_output_files; i++) {
2435 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2436 avc[j] = output_files[i]->ctx;
2444 av_sdp_create(avc, j, sdp, sizeof(sdp));
2446 if (!sdp_filename) {
2447 printf("SDP:\n%s\n", sdp);
2450 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2451 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2453 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2454 avio_closep(&sdp_pb);
2455 av_freep(&sdp_filename);
2463 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2466 for (i = 0; hwaccels[i].name; i++)
2467 if (hwaccels[i].pix_fmt == pix_fmt)
2468 return &hwaccels[i];
2472 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2474 InputStream *ist = s->opaque;
2475 const enum AVPixelFormat *p;
2478 for (p = pix_fmts; *p != -1; p++) {
2479 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2480 const HWAccel *hwaccel;
2482 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2485 hwaccel = get_hwaccel(*p);
2487 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2488 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2491 ret = hwaccel->init(s);
2493 if (ist->hwaccel_id == hwaccel->id) {
2494 av_log(NULL, AV_LOG_FATAL,
2495 "%s hwaccel requested for input stream #%d:%d, "
2496 "but cannot be initialized.\n", hwaccel->name,
2497 ist->file_index, ist->st->index);
2498 return AV_PIX_FMT_NONE;
2502 ist->active_hwaccel_id = hwaccel->id;
2503 ist->hwaccel_pix_fmt = *p;
2510 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2512 InputStream *ist = s->opaque;
2514 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2515 return ist->hwaccel_get_buffer(s, frame, flags);
2517 return avcodec_default_get_buffer2(s, frame, flags);
2520 static int init_input_stream(int ist_index, char *error, int error_len)
2523 InputStream *ist = input_streams[ist_index];
2525 if (ist->decoding_needed) {
2526 AVCodec *codec = ist->dec;
2528 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2529 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2530 return AVERROR(EINVAL);
2533 ist->dec_ctx->opaque = ist;
2534 ist->dec_ctx->get_format = get_format;
2535 ist->dec_ctx->get_buffer2 = get_buffer;
2536 ist->dec_ctx->thread_safe_callbacks = 1;
2538 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2539 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2540 (ist->decoding_needed & DECODING_FOR_OST)) {
2541 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2542 if (ist->decoding_needed & DECODING_FOR_FILTER)
2543 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2546 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2547 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2548 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2549 if (ret == AVERROR_EXPERIMENTAL)
2550 abort_codec_experimental(codec, 0);
2552 snprintf(error, error_len,
2553 "Error while opening decoder for input stream "
2555 ist->file_index, ist->st->index, av_err2str(ret));
2558 assert_avoptions(ist->decoder_opts);
2561 ist->next_pts = AV_NOPTS_VALUE;
2562 ist->next_dts = AV_NOPTS_VALUE;
2567 static InputStream *get_input_stream(OutputStream *ost)
2569 if (ost->source_index >= 0)
2570 return input_streams[ost->source_index];
2574 static int compare_int64(const void *a, const void *b)
2576 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2579 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2583 if (ost->encoding_needed) {
2584 AVCodec *codec = ost->enc;
2585 AVCodecContext *dec = NULL;
2588 if ((ist = get_input_stream(ost)))
2590 if (dec && dec->subtitle_header) {
2591 /* ASS code assumes this buffer is null terminated so add extra byte. */
2592 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2593 if (!ost->enc_ctx->subtitle_header)
2594 return AVERROR(ENOMEM);
2595 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2596 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2598 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2599 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2600 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2602 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2603 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2604 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2606 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2607 if (ret == AVERROR_EXPERIMENTAL)
2608 abort_codec_experimental(codec, 1);
2609 snprintf(error, error_len,
2610 "Error while opening encoder for output stream #%d:%d - "
2611 "maybe incorrect parameters such as bit_rate, rate, width or height",
2612 ost->file_index, ost->index);
2615 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2616 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2617 av_buffersink_set_frame_size(ost->filter->filter,
2618 ost->enc_ctx->frame_size);
2619 assert_avoptions(ost->encoder_opts);
2620 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2621 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2622 " It takes bits/s as argument, not kbits/s\n");
2624 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2626 av_log(NULL, AV_LOG_FATAL,
2627 "Error initializing the output stream codec context.\n");
2631 if (ost->enc_ctx->nb_coded_side_data) {
2634 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2635 sizeof(*ost->st->side_data));
2636 if (!ost->st->side_data)
2637 return AVERROR(ENOMEM);
2639 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2640 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2641 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2643 sd_dst->data = av_malloc(sd_src->size);
2645 return AVERROR(ENOMEM);
2646 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2647 sd_dst->size = sd_src->size;
2648 sd_dst->type = sd_src->type;
2649 ost->st->nb_side_data++;
2653 // copy timebase while removing common factors
2654 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2655 ost->st->codec->codec= ost->enc_ctx->codec;
2657 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2659 av_log(NULL, AV_LOG_FATAL,
2660 "Error setting up codec context options.\n");
2663 // copy timebase while removing common factors
2664 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2670 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2671 AVCodecContext *avctx)
2674 int n = 1, i, size, index = 0;
2677 for (p = kf; *p; p++)
2681 pts = av_malloc_array(size, sizeof(*pts));
2683 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2688 for (i = 0; i < n; i++) {
2689 char *next = strchr(p, ',');
2694 if (!memcmp(p, "chapters", 8)) {
2696 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2699 if (avf->nb_chapters > INT_MAX - size ||
2700 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2702 av_log(NULL, AV_LOG_FATAL,
2703 "Could not allocate forced key frames array.\n");
2706 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2707 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2709 for (j = 0; j < avf->nb_chapters; j++) {
2710 AVChapter *c = avf->chapters[j];
2711 av_assert1(index < size);
2712 pts[index++] = av_rescale_q(c->start, c->time_base,
2713 avctx->time_base) + t;
2718 t = parse_time_or_die("force_key_frames", p, 1);
2719 av_assert1(index < size);
2720 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2727 av_assert0(index == size);
2728 qsort(pts, size, sizeof(*pts), compare_int64);
2729 ost->forced_kf_count = size;
2730 ost->forced_kf_pts = pts;
2733 static void report_new_stream(int input_index, AVPacket *pkt)
2735 InputFile *file = input_files[input_index];
2736 AVStream *st = file->ctx->streams[pkt->stream_index];
2738 if (pkt->stream_index < file->nb_streams_warn)
2740 av_log(file->ctx, AV_LOG_WARNING,
2741 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2742 av_get_media_type_string(st->codec->codec_type),
2743 input_index, pkt->stream_index,
2744 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2745 file->nb_streams_warn = pkt->stream_index + 1;
2748 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2750 AVDictionaryEntry *e;
2752 uint8_t *encoder_string;
2753 int encoder_string_len;
2754 int format_flags = 0;
2755 int codec_flags = 0;
2757 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2760 e = av_dict_get(of->opts, "fflags", NULL, 0);
2762 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2765 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2767 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2769 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2772 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2775 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2776 encoder_string = av_mallocz(encoder_string_len);
2777 if (!encoder_string)
2780 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2781 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2783 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2784 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2785 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2786 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2789 static int transcode_init(void)
2791 int ret = 0, i, j, k;
2792 AVFormatContext *oc;
2795 char error[1024] = {0};
2798 for (i = 0; i < nb_filtergraphs; i++) {
2799 FilterGraph *fg = filtergraphs[i];
2800 for (j = 0; j < fg->nb_outputs; j++) {
2801 OutputFilter *ofilter = fg->outputs[j];
2802 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2804 if (fg->nb_inputs != 1)
2806 for (k = nb_input_streams-1; k >= 0 ; k--)
2807 if (fg->inputs[0]->ist == input_streams[k])
2809 ofilter->ost->source_index = k;
2813 /* init framerate emulation */
2814 for (i = 0; i < nb_input_files; i++) {
2815 InputFile *ifile = input_files[i];
2816 if (ifile->rate_emu)
2817 for (j = 0; j < ifile->nb_streams; j++)
2818 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2821 /* for each output stream, we compute the right encoding parameters */
2822 for (i = 0; i < nb_output_streams; i++) {
2823 AVCodecContext *enc_ctx;
2824 AVCodecContext *dec_ctx = NULL;
2825 ost = output_streams[i];
2826 oc = output_files[ost->file_index]->ctx;
2827 ist = get_input_stream(ost);
2829 if (ost->attachment_filename)
2832 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2835 dec_ctx = ist->dec_ctx;
2837 ost->st->disposition = ist->st->disposition;
2838 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2839 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2841 for (j=0; j<oc->nb_streams; j++) {
2842 AVStream *st = oc->streams[j];
2843 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2846 if (j == oc->nb_streams)
2847 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2848 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2851 if (ost->stream_copy) {
2853 uint64_t extra_size;
2855 av_assert0(ist && !ost->filter);
2857 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2859 if (extra_size > INT_MAX) {
2860 return AVERROR(EINVAL);
2863 /* if stream_copy is selected, no need to decode or encode */
2864 enc_ctx->codec_id = dec_ctx->codec_id;
2865 enc_ctx->codec_type = dec_ctx->codec_type;
2867 if (!enc_ctx->codec_tag) {
2868 unsigned int codec_tag;
2869 if (!oc->oformat->codec_tag ||
2870 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2871 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2872 enc_ctx->codec_tag = dec_ctx->codec_tag;
2875 enc_ctx->bit_rate = dec_ctx->bit_rate;
2876 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2877 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2878 enc_ctx->field_order = dec_ctx->field_order;
2879 if (dec_ctx->extradata_size) {
2880 enc_ctx->extradata = av_mallocz(extra_size);
2881 if (!enc_ctx->extradata) {
2882 return AVERROR(ENOMEM);
2884 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2886 enc_ctx->extradata_size= dec_ctx->extradata_size;
2887 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2889 enc_ctx->time_base = ist->st->time_base;
2891 * Avi is a special case here because it supports variable fps but
2892 * having the fps and timebase differe significantly adds quite some
2895 if(!strcmp(oc->oformat->name, "avi")) {
2896 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2897 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2898 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2899 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2901 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2902 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2903 enc_ctx->ticks_per_frame = 2;
2904 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2905 && av_q2d(ist->st->time_base) < 1.0/500
2907 enc_ctx->time_base = dec_ctx->time_base;
2908 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2909 enc_ctx->time_base.den *= 2;
2910 enc_ctx->ticks_per_frame = 2;
2912 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2913 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2914 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2915 && strcmp(oc->oformat->name, "f4v")
2917 if( copy_tb<0 && dec_ctx->time_base.den
2918 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2919 && av_q2d(ist->st->time_base) < 1.0/500
2921 enc_ctx->time_base = dec_ctx->time_base;
2922 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2925 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2926 && dec_ctx->time_base.num < dec_ctx->time_base.den
2927 && dec_ctx->time_base.num > 0
2928 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2929 enc_ctx->time_base = dec_ctx->time_base;
2932 if (!ost->frame_rate.num)
2933 ost->frame_rate = ist->framerate;
2934 if(ost->frame_rate.num)
2935 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2937 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2938 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2940 if (ist->st->nb_side_data) {
2941 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2942 sizeof(*ist->st->side_data));
2943 if (!ost->st->side_data)
2944 return AVERROR(ENOMEM);
2946 ost->st->nb_side_data = 0;
2947 for (j = 0; j < ist->st->nb_side_data; j++) {
2948 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2949 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2951 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2954 sd_dst->data = av_malloc(sd_src->size);
2956 return AVERROR(ENOMEM);
2957 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2958 sd_dst->size = sd_src->size;
2959 sd_dst->type = sd_src->type;
2960 ost->st->nb_side_data++;
2964 ost->parser = av_parser_init(enc_ctx->codec_id);
2966 switch (enc_ctx->codec_type) {
2967 case AVMEDIA_TYPE_AUDIO:
2968 if (audio_volume != 256) {
2969 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2972 enc_ctx->channel_layout = dec_ctx->channel_layout;
2973 enc_ctx->sample_rate = dec_ctx->sample_rate;
2974 enc_ctx->channels = dec_ctx->channels;
2975 enc_ctx->frame_size = dec_ctx->frame_size;
2976 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2977 enc_ctx->block_align = dec_ctx->block_align;
2978 enc_ctx->initial_padding = dec_ctx->delay;
2979 enc_ctx->profile = dec_ctx->profile;
2980 #if FF_API_AUDIOENC_DELAY
2981 enc_ctx->delay = dec_ctx->delay;
2983 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2984 enc_ctx->block_align= 0;
2985 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2986 enc_ctx->block_align= 0;
2988 case AVMEDIA_TYPE_VIDEO:
2989 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2990 enc_ctx->width = dec_ctx->width;
2991 enc_ctx->height = dec_ctx->height;
2992 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2993 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2995 av_mul_q(ost->frame_aspect_ratio,
2996 (AVRational){ enc_ctx->height, enc_ctx->width });
2997 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2998 "with stream copy may produce invalid files\n");
3000 else if (ist->st->sample_aspect_ratio.num)
3001 sar = ist->st->sample_aspect_ratio;
3003 sar = dec_ctx->sample_aspect_ratio;
3004 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3005 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3006 ost->st->r_frame_rate = ist->st->r_frame_rate;
3008 case AVMEDIA_TYPE_SUBTITLE:
3009 enc_ctx->width = dec_ctx->width;
3010 enc_ctx->height = dec_ctx->height;
3012 case AVMEDIA_TYPE_UNKNOWN:
3013 case AVMEDIA_TYPE_DATA:
3014 case AVMEDIA_TYPE_ATTACHMENT:
3021 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3023 /* should only happen when a default codec is not present. */
3024 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3025 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3026 ret = AVERROR(EINVAL);
3030 set_encoder_id(output_files[ost->file_index], ost);
3033 if (qsv_transcode_init(ost))
3038 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3039 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3041 fg = init_simple_filtergraph(ist, ost);
3042 if (configure_filtergraph(fg)) {
3043 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3048 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3049 if (!ost->frame_rate.num)
3050 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3051 if (ist && !ost->frame_rate.num)
3052 ost->frame_rate = ist->framerate;
3053 if (ist && !ost->frame_rate.num)
3054 ost->frame_rate = ist->st->r_frame_rate;
3055 if (ist && !ost->frame_rate.num) {
3056 ost->frame_rate = (AVRational){25, 1};
3057 av_log(NULL, AV_LOG_WARNING,
3059 "about the input framerate is available. Falling "
3060 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3061 "if you want a different framerate.\n",
3062 ost->file_index, ost->index);
3064 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3065 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3066 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3067 ost->frame_rate = ost->enc->supported_framerates[idx];
3069 // reduce frame rate for mpeg4 to be within the spec limits
3070 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3071 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3072 ost->frame_rate.num, ost->frame_rate.den, 65535);
3076 switch (enc_ctx->codec_type) {
3077 case AVMEDIA_TYPE_AUDIO:
3078 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3079 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3080 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3081 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3082 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3084 case AVMEDIA_TYPE_VIDEO:
3085 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3086 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3087 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3088 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3089 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3090 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3091 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3093 for (j = 0; j < ost->forced_kf_count; j++)
3094 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3096 enc_ctx->time_base);
3098 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3099 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3100 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3101 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3102 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3103 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3104 if (!strncmp(ost->enc->name, "libx264", 7) &&
3105 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3106 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3107 av_log(NULL, AV_LOG_WARNING,
3108 "No pixel format specified, %s for H.264 encoding chosen.\n"
3109 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3110 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3111 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3112 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3113 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3114 av_log(NULL, AV_LOG_WARNING,
3115 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3116 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3117 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3118 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3120 ost->st->avg_frame_rate = ost->frame_rate;
3123 enc_ctx->width != dec_ctx->width ||
3124 enc_ctx->height != dec_ctx->height ||
3125 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3126 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3129 if (ost->forced_keyframes) {
3130 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3131 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3132 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3134 av_log(NULL, AV_LOG_ERROR,
3135 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3138 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3139 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3140 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3141 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3143 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3144 // parse it only for static kf timings
3145 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3146 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3150 case AVMEDIA_TYPE_SUBTITLE:
3151 enc_ctx->time_base = (AVRational){1, 1000};
3152 if (!enc_ctx->width) {
3153 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3154 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3157 case AVMEDIA_TYPE_DATA:
3165 if (ost->disposition) {
3166 static const AVOption opts[] = {
3167 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3168 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3169 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3170 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3171 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3172 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3173 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3174 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3175 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3176 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3177 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3178 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3179 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3180 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3183 static const AVClass class = {
3185 .item_name = av_default_item_name,
3187 .version = LIBAVUTIL_VERSION_INT,
3189 const AVClass *pclass = &class;
3191 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3197 /* open each encoder */
3198 for (i = 0; i < nb_output_streams; i++) {
3199 ret = init_output_stream(output_streams[i], error, sizeof(error));
3204 /* init input streams */
3205 for (i = 0; i < nb_input_streams; i++)
3206 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3207 for (i = 0; i < nb_output_streams; i++) {
3208 ost = output_streams[i];
3209 avcodec_close(ost->enc_ctx);
3214 /* discard unused programs */
3215 for (i = 0; i < nb_input_files; i++) {
3216 InputFile *ifile = input_files[i];
3217 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3218 AVProgram *p = ifile->ctx->programs[j];
3219 int discard = AVDISCARD_ALL;
3221 for (k = 0; k < p->nb_stream_indexes; k++)
3222 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3223 discard = AVDISCARD_DEFAULT;
3226 p->discard = discard;
3230 /* open files and write file headers */
3231 for (i = 0; i < nb_output_files; i++) {
3232 oc = output_files[i]->ctx;
3233 oc->interrupt_callback = int_cb;
3234 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3235 snprintf(error, sizeof(error),
3236 "Could not write header for output file #%d "
3237 "(incorrect codec parameters ?): %s",
3238 i, av_err2str(ret));
3239 ret = AVERROR(EINVAL);
3242 // assert_avoptions(output_files[i]->opts);
3243 if (strcmp(oc->oformat->name, "rtp")) {
3249 /* dump the file output parameters - cannot be done before in case
3251 for (i = 0; i < nb_output_files; i++) {
3252 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3255 /* dump the stream mapping */
3256 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3257 for (i = 0; i < nb_input_streams; i++) {
3258 ist = input_streams[i];
3260 for (j = 0; j < ist->nb_filters; j++) {
3261 if (ist->filters[j]->graph->graph_desc) {
3262 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3263 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3264 ist->filters[j]->name);
3265 if (nb_filtergraphs > 1)
3266 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3267 av_log(NULL, AV_LOG_INFO, "\n");
3272 for (i = 0; i < nb_output_streams; i++) {
3273 ost = output_streams[i];
3275 if (ost->attachment_filename) {
3276 /* an attached file */
3277 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3278 ost->attachment_filename, ost->file_index, ost->index);
3282 if (ost->filter && ost->filter->graph->graph_desc) {
3283 /* output from a complex graph */
3284 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3285 if (nb_filtergraphs > 1)
3286 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3288 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3289 ost->index, ost->enc ? ost->enc->name : "?");
3293 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3294 input_streams[ost->source_index]->file_index,
3295 input_streams[ost->source_index]->st->index,
3298 if (ost->sync_ist != input_streams[ost->source_index])
3299 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3300 ost->sync_ist->file_index,
3301 ost->sync_ist->st->index);
3302 if (ost->stream_copy)
3303 av_log(NULL, AV_LOG_INFO, " (copy)");
3305 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3306 const AVCodec *out_codec = ost->enc;
3307 const char *decoder_name = "?";
3308 const char *in_codec_name = "?";
3309 const char *encoder_name = "?";
3310 const char *out_codec_name = "?";
3311 const AVCodecDescriptor *desc;
3314 decoder_name = in_codec->name;
3315 desc = avcodec_descriptor_get(in_codec->id);
3317 in_codec_name = desc->name;
3318 if (!strcmp(decoder_name, in_codec_name))
3319 decoder_name = "native";
3323 encoder_name = out_codec->name;
3324 desc = avcodec_descriptor_get(out_codec->id);
3326 out_codec_name = desc->name;
3327 if (!strcmp(encoder_name, out_codec_name))
3328 encoder_name = "native";
3331 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3332 in_codec_name, decoder_name,
3333 out_codec_name, encoder_name);
3335 av_log(NULL, AV_LOG_INFO, "\n");
3339 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3343 if (sdp_filename || want_sdp) {
3347 transcode_init_done = 1;
3352 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3353 static int need_output(void)
3357 for (i = 0; i < nb_output_streams; i++) {
3358 OutputStream *ost = output_streams[i];
3359 OutputFile *of = output_files[ost->file_index];
3360 AVFormatContext *os = output_files[ost->file_index]->ctx;
3362 if (ost->finished ||
3363 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3365 if (ost->frame_number >= ost->max_frames) {
3367 for (j = 0; j < of->ctx->nb_streams; j++)
3368 close_output_stream(output_streams[of->ost_index + j]);
3379 * Select the output stream to process.
3381 * @return selected output stream, or NULL if none available
3383 static OutputStream *choose_output(void)
3386 int64_t opts_min = INT64_MAX;
3387 OutputStream *ost_min = NULL;
3389 for (i = 0; i < nb_output_streams; i++) {
3390 OutputStream *ost = output_streams[i];
3391 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3392 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3394 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3395 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3397 if (!ost->finished && opts < opts_min) {
3399 ost_min = ost->unavailable ? NULL : ost;
3405 static void set_tty_echo(int on)
3409 if (tcgetattr(0, &tty) == 0) {
3410 if (on) tty.c_lflag |= ECHO;
3411 else tty.c_lflag &= ~ECHO;
3412 tcsetattr(0, TCSANOW, &tty);
3417 static int check_keyboard_interaction(int64_t cur_time)
3420 static int64_t last_time;
3421 if (received_nb_signals)
3422 return AVERROR_EXIT;
3423 /* read_key() returns 0 on EOF */
3424 if(cur_time - last_time >= 100000 && !run_as_daemon){
3426 last_time = cur_time;
3430 return AVERROR_EXIT;
3431 if (key == '+') av_log_set_level(av_log_get_level()+10);
3432 if (key == '-') av_log_set_level(av_log_get_level()-10);
3433 if (key == 's') qp_hist ^= 1;
3436 do_hex_dump = do_pkt_dump = 0;
3437 } else if(do_pkt_dump){
3441 av_log_set_level(AV_LOG_DEBUG);
3443 if (key == 'c' || key == 'C'){
3444 char buf[4096], target[64], command[256], arg[256] = {0};
3447 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3450 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3455 fprintf(stderr, "\n");
3457 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3458 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3459 target, time, command, arg);
3460 for (i = 0; i < nb_filtergraphs; i++) {
3461 FilterGraph *fg = filtergraphs[i];
3464 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3465 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3466 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3467 } else if (key == 'c') {
3468 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3469 ret = AVERROR_PATCHWELCOME;
3471 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3473 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3478 av_log(NULL, AV_LOG_ERROR,
3479 "Parse error, at least 3 arguments were expected, "
3480 "only %d given in string '%s'\n", n, buf);
3483 if (key == 'd' || key == 'D'){
3486 debug = input_streams[0]->st->codec->debug<<1;
3487 if(!debug) debug = 1;
3488 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3495 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3500 fprintf(stderr, "\n");
3501 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3502 fprintf(stderr,"error parsing debug value\n");
3504 for(i=0;i<nb_input_streams;i++) {
3505 input_streams[i]->st->codec->debug = debug;
3507 for(i=0;i<nb_output_streams;i++) {
3508 OutputStream *ost = output_streams[i];
3509 ost->enc_ctx->debug = debug;
3511 if(debug) av_log_set_level(AV_LOG_DEBUG);
3512 fprintf(stderr,"debug=%d\n", debug);
3515 fprintf(stderr, "key function\n"
3516 "? show this help\n"
3517 "+ increase verbosity\n"
3518 "- decrease verbosity\n"
3519 "c Send command to first matching filter supporting it\n"
3520 "C Send/Que command to all matching filters\n"
3521 "D cycle through available debug modes\n"
3522 "h dump packets/hex press to cycle through the 3 states\n"
3524 "s Show QP histogram\n"
3531 static void *input_thread(void *arg)
3534 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3539 ret = av_read_frame(f->ctx, &pkt);
3541 if (ret == AVERROR(EAGAIN)) {
3546 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3549 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3550 if (flags && ret == AVERROR(EAGAIN)) {
3552 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3553 av_log(f->ctx, AV_LOG_WARNING,
3554 "Thread message queue blocking; consider raising the "
3555 "thread_queue_size option (current value: %d)\n",
3556 f->thread_queue_size);
3559 if (ret != AVERROR_EOF)
3560 av_log(f->ctx, AV_LOG_ERROR,
3561 "Unable to send packet to main thread: %s\n",
3563 av_packet_unref(&pkt);
3564 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3572 static void free_input_threads(void)
3576 for (i = 0; i < nb_input_files; i++) {
3577 InputFile *f = input_files[i];
3580 if (!f || !f->in_thread_queue)
3582 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3583 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3584 av_packet_unref(&pkt);
3586 pthread_join(f->thread, NULL);
3588 av_thread_message_queue_free(&f->in_thread_queue);
3592 static int init_input_threads(void)
3596 if (nb_input_files == 1)
3599 for (i = 0; i < nb_input_files; i++) {
3600 InputFile *f = input_files[i];
3602 if (f->ctx->pb ? !f->ctx->pb->seekable :
3603 strcmp(f->ctx->iformat->name, "lavfi"))
3604 f->non_blocking = 1;
3605 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3606 f->thread_queue_size, sizeof(AVPacket));
3610 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3611 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3612 av_thread_message_queue_free(&f->in_thread_queue);
3613 return AVERROR(ret);
3619 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3621 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3623 AV_THREAD_MESSAGE_NONBLOCK : 0);
3627 static int get_input_packet(InputFile *f, AVPacket *pkt)
3631 for (i = 0; i < f->nb_streams; i++) {
3632 InputStream *ist = input_streams[f->ist_index + i];
3633 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3634 int64_t now = av_gettime_relative() - ist->start;
3636 return AVERROR(EAGAIN);
3641 if (nb_input_files > 1)
3642 return get_input_packet_mt(f, pkt);
3644 return av_read_frame(f->ctx, pkt);
3647 static int got_eagain(void)
3650 for (i = 0; i < nb_output_streams; i++)
3651 if (output_streams[i]->unavailable)
3656 static void reset_eagain(void)
3659 for (i = 0; i < nb_input_files; i++)
3660 input_files[i]->eagain = 0;
3661 for (i = 0; i < nb_output_streams; i++)
3662 output_streams[i]->unavailable = 0;
3665 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3666 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3667 AVRational time_base)
3673 return tmp_time_base;
3676 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3679 return tmp_time_base;
3685 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3688 AVCodecContext *avctx;
3689 int i, ret, has_audio = 0;
3690 int64_t duration = 0;
3692 ret = av_seek_frame(is, -1, is->start_time, 0);
3696 for (i = 0; i < ifile->nb_streams; i++) {
3697 ist = input_streams[ifile->ist_index + i];
3698 avctx = ist->dec_ctx;
3701 if (ist->decoding_needed) {
3702 process_input_packet(ist, NULL, 1);
3703 avcodec_flush_buffers(avctx);
3706 /* duration is the length of the last frame in a stream
3707 * when audio stream is present we don't care about
3708 * last video frame length because it's not defined exactly */
3709 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3713 for (i = 0; i < ifile->nb_streams; i++) {
3714 ist = input_streams[ifile->ist_index + i];
3715 avctx = ist->dec_ctx;
3718 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3719 AVRational sample_rate = {1, avctx->sample_rate};
3721 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3725 if (ist->framerate.num) {
3726 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3727 } else if (ist->st->avg_frame_rate.num) {
3728 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3729 } else duration = 1;
3731 if (!ifile->duration)
3732 ifile->time_base = ist->st->time_base;
3733 /* the total duration of the stream, max_pts - min_pts is
3734 * the duration of the stream without the last frame */
3735 duration += ist->max_pts - ist->min_pts;
3736 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3740 if (ifile->loop > 0)
3748 * - 0 -- one packet was read and processed
3749 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3750 * this function should be called again
3751 * - AVERROR_EOF -- this function should not be called again
3753 static int process_input(int file_index)
3755 InputFile *ifile = input_files[file_index];
3756 AVFormatContext *is;
3764 ret = get_input_packet(ifile, &pkt);
3766 if (ret == AVERROR(EAGAIN)) {
3770 if (ret < 0 && ifile->loop) {
3771 if ((ret = seek_to_start(ifile, is)) < 0)
3773 ret = get_input_packet(ifile, &pkt);
3776 if (ret != AVERROR_EOF) {
3777 print_error(is->filename, ret);
3782 for (i = 0; i < ifile->nb_streams; i++) {
3783 ist = input_streams[ifile->ist_index + i];
3784 if (ist->decoding_needed) {
3785 ret = process_input_packet(ist, NULL, 0);
3790 /* mark all outputs that don't go through lavfi as finished */
3791 for (j = 0; j < nb_output_streams; j++) {
3792 OutputStream *ost = output_streams[j];
3794 if (ost->source_index == ifile->ist_index + i &&
3795 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3796 finish_output_stream(ost);
3800 ifile->eof_reached = 1;
3801 return AVERROR(EAGAIN);
3807 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3808 is->streams[pkt.stream_index]);
3810 /* the following test is needed in case new streams appear
3811 dynamically in stream : we ignore them */
3812 if (pkt.stream_index >= ifile->nb_streams) {
3813 report_new_stream(file_index, &pkt);
3814 goto discard_packet;
3817 ist = input_streams[ifile->ist_index + pkt.stream_index];
3819 ist->data_size += pkt.size;
3823 goto discard_packet;
3825 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3826 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3831 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3832 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3833 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3834 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3835 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3836 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3837 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3838 av_ts2str(input_files[ist->file_index]->ts_offset),
3839 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3842 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3843 int64_t stime, stime2;
3844 // Correcting starttime based on the enabled streams
3845 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3846 // so we instead do it here as part of discontinuity handling
3847 if ( ist->next_dts == AV_NOPTS_VALUE
3848 && ifile->ts_offset == -is->start_time
3849 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3850 int64_t new_start_time = INT64_MAX;
3851 for (i=0; i<is->nb_streams; i++) {
3852 AVStream *st = is->streams[i];
3853 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3855 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3857 if (new_start_time > is->start_time) {
3858 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3859 ifile->ts_offset = -new_start_time;
3863 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3864 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3865 ist->wrap_correction_done = 1;
3867 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3868 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3869 ist->wrap_correction_done = 0;
3871 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3872 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3873 ist->wrap_correction_done = 0;
3877 /* add the stream-global side data to the first packet */
3878 if (ist->nb_packets == 1) {
3879 if (ist->st->nb_side_data)
3880 av_packet_split_side_data(&pkt);
3881 for (i = 0; i < ist->st->nb_side_data; i++) {
3882 AVPacketSideData *src_sd = &ist->st->side_data[i];
3885 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3887 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3890 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3894 memcpy(dst_data, src_sd->data, src_sd->size);
3898 if (pkt.dts != AV_NOPTS_VALUE)
3899 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3900 if (pkt.pts != AV_NOPTS_VALUE)
3901 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3903 if (pkt.pts != AV_NOPTS_VALUE)
3904 pkt.pts *= ist->ts_scale;
3905 if (pkt.dts != AV_NOPTS_VALUE)
3906 pkt.dts *= ist->ts_scale;
3908 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3909 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3910 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3911 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3912 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3913 int64_t delta = pkt_dts - ifile->last_ts;
3914 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3915 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3916 ifile->ts_offset -= delta;
3917 av_log(NULL, AV_LOG_DEBUG,
3918 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3919 delta, ifile->ts_offset);
3920 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3921 if (pkt.pts != AV_NOPTS_VALUE)
3922 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3926 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3927 if (pkt.pts != AV_NOPTS_VALUE) {
3928 pkt.pts += duration;
3929 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3930 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3933 if (pkt.dts != AV_NOPTS_VALUE)
3934 pkt.dts += duration;
3936 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3937 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3938 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3939 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3941 int64_t delta = pkt_dts - ist->next_dts;
3942 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3943 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3944 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3945 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3946 ifile->ts_offset -= delta;
3947 av_log(NULL, AV_LOG_DEBUG,
3948 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3949 delta, ifile->ts_offset);
3950 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3951 if (pkt.pts != AV_NOPTS_VALUE)
3952 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3955 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3956 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3957 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3958 pkt.dts = AV_NOPTS_VALUE;
3960 if (pkt.pts != AV_NOPTS_VALUE){
3961 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3962 delta = pkt_pts - ist->next_dts;
3963 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3964 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3965 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3966 pkt.pts = AV_NOPTS_VALUE;
3972 if (pkt.dts != AV_NOPTS_VALUE)
3973 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3976 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3977 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3978 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3979 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3980 av_ts2str(input_files[ist->file_index]->ts_offset),
3981 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3984 sub2video_heartbeat(ist, pkt.pts);
3986 process_input_packet(ist, &pkt, 0);
3989 av_packet_unref(&pkt);
3995 * Perform a step of transcoding for the specified filter graph.
3997 * @param[in] graph filter graph to consider
3998 * @param[out] best_ist input stream where a frame would allow to continue
3999 * @return 0 for success, <0 for error
4001 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4004 int nb_requests, nb_requests_max = 0;
4005 InputFilter *ifilter;
4009 ret = avfilter_graph_request_oldest(graph->graph);
4011 return reap_filters(0);
4013 if (ret == AVERROR_EOF) {
4014 ret = reap_filters(1);
4015 for (i = 0; i < graph->nb_outputs; i++)
4016 close_output_stream(graph->outputs[i]->ost);
4019 if (ret != AVERROR(EAGAIN))
4022 for (i = 0; i < graph->nb_inputs; i++) {
4023 ifilter = graph->inputs[i];
4025 if (input_files[ist->file_index]->eagain ||
4026 input_files[ist->file_index]->eof_reached)
4028 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4029 if (nb_requests > nb_requests_max) {
4030 nb_requests_max = nb_requests;
4036 for (i = 0; i < graph->nb_outputs; i++)
4037 graph->outputs[i]->ost->unavailable = 1;
4043 * Run a single step of transcoding.
4045 * @return 0 for success, <0 for error
4047 static int transcode_step(void)
4053 ost = choose_output();
4060 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4065 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4070 av_assert0(ost->source_index >= 0);
4071 ist = input_streams[ost->source_index];
4074 ret = process_input(ist->file_index);
4075 if (ret == AVERROR(EAGAIN)) {
4076 if (input_files[ist->file_index]->eagain)
4077 ost->unavailable = 1;
4082 return ret == AVERROR_EOF ? 0 : ret;
4084 return reap_filters(0);
4088 * The following code is the main loop of the file converter
4090 static int transcode(void)
4093 AVFormatContext *os;
4096 int64_t timer_start;
4097 int64_t total_packets_written = 0;
4099 ret = transcode_init();
4103 if (stdin_interaction) {
4104 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4107 timer_start = av_gettime_relative();
4110 if ((ret = init_input_threads()) < 0)
4114 while (!received_sigterm) {
4115 int64_t cur_time= av_gettime_relative();
4117 /* if 'q' pressed, exits */
4118 if (stdin_interaction)
4119 if (check_keyboard_interaction(cur_time) < 0)
4122 /* check if there's any stream where output is still needed */
4123 if (!need_output()) {
4124 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4128 ret = transcode_step();
4129 if (ret < 0 && ret != AVERROR_EOF) {
4131 av_strerror(ret, errbuf, sizeof(errbuf));
4133 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4137 /* dump report by using the output first video and audio streams */
4138 print_report(0, timer_start, cur_time);
4141 free_input_threads();
4144 /* at the end of stream, we must flush the decoder buffers */
4145 for (i = 0; i < nb_input_streams; i++) {
4146 ist = input_streams[i];
4147 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4148 process_input_packet(ist, NULL, 0);
4155 /* write the trailer if needed and close file */
4156 for (i = 0; i < nb_output_files; i++) {
4157 os = output_files[i]->ctx;
4158 if ((ret = av_write_trailer(os)) < 0) {
4159 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4165 /* dump report by using the first video and audio streams */
4166 print_report(1, timer_start, av_gettime_relative());
4168 /* close each encoder */
4169 for (i = 0; i < nb_output_streams; i++) {
4170 ost = output_streams[i];
4171 if (ost->encoding_needed) {
4172 av_freep(&ost->enc_ctx->stats_in);
4174 total_packets_written += ost->packets_written;
4177 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4178 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4182 /* close each decoder */
4183 for (i = 0; i < nb_input_streams; i++) {
4184 ist = input_streams[i];
4185 if (ist->decoding_needed) {
4186 avcodec_close(ist->dec_ctx);
4187 if (ist->hwaccel_uninit)
4188 ist->hwaccel_uninit(ist->dec_ctx);
4197 free_input_threads();
4200 if (output_streams) {
4201 for (i = 0; i < nb_output_streams; i++) {
4202 ost = output_streams[i];
4205 if (fclose(ost->logfile))
4206 av_log(NULL, AV_LOG_ERROR,
4207 "Error closing logfile, loss of information possible: %s\n",
4208 av_err2str(AVERROR(errno)));
4209 ost->logfile = NULL;
4211 av_freep(&ost->forced_kf_pts);
4212 av_freep(&ost->apad);
4213 av_freep(&ost->disposition);
4214 av_dict_free(&ost->encoder_opts);
4215 av_dict_free(&ost->sws_dict);
4216 av_dict_free(&ost->swr_opts);
4217 av_dict_free(&ost->resample_opts);
4225 static int64_t getutime(void)
4228 struct rusage rusage;
4230 getrusage(RUSAGE_SELF, &rusage);
4231 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4232 #elif HAVE_GETPROCESSTIMES
4234 FILETIME c, e, k, u;
4235 proc = GetCurrentProcess();
4236 GetProcessTimes(proc, &c, &e, &k, &u);
4237 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4239 return av_gettime_relative();
4243 static int64_t getmaxrss(void)
4245 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4246 struct rusage rusage;
4247 getrusage(RUSAGE_SELF, &rusage);
4248 return (int64_t)rusage.ru_maxrss * 1024;
4249 #elif HAVE_GETPROCESSMEMORYINFO
4251 PROCESS_MEMORY_COUNTERS memcounters;
4252 proc = GetCurrentProcess();
4253 memcounters.cb = sizeof(memcounters);
4254 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4255 return memcounters.PeakPagefileUsage;
4261 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4265 int main(int argc, char **argv)
4270 register_exit(ffmpeg_cleanup);
4272 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4274 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4275 parse_loglevel(argc, argv, options);
4277 if(argc>1 && !strcmp(argv[1], "-d")){
4279 av_log_set_callback(log_callback_null);
4284 avcodec_register_all();
4286 avdevice_register_all();
4288 avfilter_register_all();
4290 avformat_network_init();
4292 show_banner(argc, argv, options);
4296 /* parse options and open all input/output files */
4297 ret = ffmpeg_parse_options(argc, argv);
4301 if (nb_output_files <= 0 && nb_input_files == 0) {
4303 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4307 /* file converter / grab */
4308 if (nb_output_files <= 0) {
4309 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4313 // if (nb_input_files == 0) {
4314 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4318 current_time = ti = getutime();
4319 if (transcode() < 0)
4321 ti = getutime() - ti;
4323 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4325 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4326 decode_error_stat[0], decode_error_stat[1]);
4327 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4330 exit_program(received_nb_signals ? 255 : main_return_code);
4331 return main_return_code;