2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
698 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
699 if (pkt->dts != AV_NOPTS_VALUE &&
700 pkt->pts != AV_NOPTS_VALUE &&
701 pkt->dts > pkt->pts) {
702 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
704 ost->file_index, ost->st->index);
706 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
707 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
708 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
711 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
712 pkt->dts != AV_NOPTS_VALUE &&
713 ost->last_mux_dts != AV_NOPTS_VALUE) {
714 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
715 if (pkt->dts < max) {
716 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
717 av_log(s, loglevel, "Non-monotonous DTS in output stream "
718 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
719 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
721 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
724 av_log(s, loglevel, "changing to %"PRId64". This may result "
725 "in incorrect timestamps in the output file.\n",
727 if(pkt->pts >= pkt->dts)
728 pkt->pts = FFMAX(pkt->pts, max);
733 ost->last_mux_dts = pkt->dts;
735 ost->data_size += pkt->size;
736 ost->packets_written++;
738 pkt->stream_index = ost->index;
741 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
742 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
743 av_get_media_type_string(ost->enc_ctx->codec_type),
744 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
745 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
750 ret = av_interleaved_write_frame(s, pkt);
752 print_error("av_interleaved_write_frame()", ret);
753 main_return_code = 1;
754 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
756 av_packet_unref(pkt);
759 static void close_output_stream(OutputStream *ost)
761 OutputFile *of = output_files[ost->file_index];
763 ost->finished |= ENCODER_FINISHED;
765 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
766 of->recording_time = FFMIN(of->recording_time, end);
770 static int check_recording_time(OutputStream *ost)
772 OutputFile *of = output_files[ost->file_index];
774 if (of->recording_time != INT64_MAX &&
775 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
776 AV_TIME_BASE_Q) >= 0) {
777 close_output_stream(ost);
783 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
786 AVCodecContext *enc = ost->enc_ctx;
790 av_init_packet(&pkt);
794 if (!check_recording_time(ost))
797 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
798 frame->pts = ost->sync_opts;
799 ost->sync_opts = frame->pts + frame->nb_samples;
800 ost->samples_encoded += frame->nb_samples;
801 ost->frames_encoded++;
803 av_assert0(pkt.size || !pkt.data);
804 update_benchmark(NULL);
806 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
807 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
808 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
809 enc->time_base.num, enc->time_base.den);
812 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
813 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
816 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
819 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
822 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
823 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
824 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
825 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
828 write_frame(s, &pkt, ost);
832 static void do_subtitle_out(AVFormatContext *s,
837 int subtitle_out_max_size = 1024 * 1024;
838 int subtitle_out_size, nb, i;
843 if (sub->pts == AV_NOPTS_VALUE) {
844 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
853 subtitle_out = av_malloc(subtitle_out_max_size);
855 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
860 /* Note: DVB subtitle need one packet to draw them and one other
861 packet to clear them */
862 /* XXX: signal it in the codec context ? */
863 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
868 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
870 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
871 pts -= output_files[ost->file_index]->start_time;
872 for (i = 0; i < nb; i++) {
873 unsigned save_num_rects = sub->num_rects;
875 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
876 if (!check_recording_time(ost))
880 // start_display_time is required to be 0
881 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
882 sub->end_display_time -= sub->start_display_time;
883 sub->start_display_time = 0;
887 ost->frames_encoded++;
889 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
890 subtitle_out_max_size, sub);
892 sub->num_rects = save_num_rects;
893 if (subtitle_out_size < 0) {
894 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
898 av_init_packet(&pkt);
899 pkt.data = subtitle_out;
900 pkt.size = subtitle_out_size;
901 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
902 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
903 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
904 /* XXX: the pts correction is handled here. Maybe handling
905 it in the codec would be better */
907 pkt.pts += 90 * sub->start_display_time;
909 pkt.pts += 90 * sub->end_display_time;
912 write_frame(s, &pkt, ost);
916 static void do_video_out(AVFormatContext *s,
918 AVFrame *next_picture,
921 int ret, format_video_sync;
923 AVCodecContext *enc = ost->enc_ctx;
924 AVCodecContext *mux_enc = ost->st->codec;
925 int nb_frames, nb0_frames, i;
926 double delta, delta0;
929 InputStream *ist = NULL;
930 AVFilterContext *filter = ost->filter->filter;
932 if (ost->source_index >= 0)
933 ist = input_streams[ost->source_index];
935 if (filter->inputs[0]->frame_rate.num > 0 &&
936 filter->inputs[0]->frame_rate.den > 0)
937 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
939 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
940 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
942 if (!ost->filters_script &&
946 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
947 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
952 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
953 ost->last_nb0_frames[1],
954 ost->last_nb0_frames[2]);
956 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
957 delta = delta0 + duration;
959 /* by default, we output a single frame */
960 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
963 format_video_sync = video_sync_method;
964 if (format_video_sync == VSYNC_AUTO) {
965 if(!strcmp(s->oformat->name, "avi")) {
966 format_video_sync = VSYNC_VFR;
968 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
970 && format_video_sync == VSYNC_CFR
971 && input_files[ist->file_index]->ctx->nb_streams == 1
972 && input_files[ist->file_index]->input_ts_offset == 0) {
973 format_video_sync = VSYNC_VSCFR;
975 if (format_video_sync == VSYNC_CFR && copy_ts) {
976 format_video_sync = VSYNC_VSCFR;
979 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
983 format_video_sync != VSYNC_PASSTHROUGH &&
984 format_video_sync != VSYNC_DROP) {
986 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
988 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
989 sync_ipts = ost->sync_opts;
994 switch (format_video_sync) {
996 if (ost->frame_number == 0 && delta0 >= 0.5) {
997 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1000 ost->sync_opts = lrint(sync_ipts);
1003 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1004 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1006 } else if (delta < -1.1)
1008 else if (delta > 1.1) {
1009 nb_frames = lrintf(delta);
1011 nb0_frames = lrintf(delta0 - 0.6);
1017 else if (delta > 0.6)
1018 ost->sync_opts = lrint(sync_ipts);
1021 case VSYNC_PASSTHROUGH:
1022 ost->sync_opts = lrint(sync_ipts);
1029 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1030 nb0_frames = FFMIN(nb0_frames, nb_frames);
1032 memmove(ost->last_nb0_frames + 1,
1033 ost->last_nb0_frames,
1034 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1035 ost->last_nb0_frames[0] = nb0_frames;
1037 if (nb0_frames == 0 && ost->last_dropped) {
1039 av_log(NULL, AV_LOG_VERBOSE,
1040 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1041 ost->frame_number, ost->st->index, ost->last_frame->pts);
1043 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1044 if (nb_frames > dts_error_threshold * 30) {
1045 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1049 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1050 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1052 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1054 /* duplicates frame if needed */
1055 for (i = 0; i < nb_frames; i++) {
1056 AVFrame *in_picture;
1057 av_init_packet(&pkt);
1061 if (i < nb0_frames && ost->last_frame) {
1062 in_picture = ost->last_frame;
1064 in_picture = next_picture;
1069 in_picture->pts = ost->sync_opts;
1072 if (!check_recording_time(ost))
1074 if (ost->frame_number >= ost->max_frames)
1078 #if FF_API_LAVF_FMT_RAWPICTURE
1079 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1080 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1081 /* raw pictures are written as AVPicture structure to
1082 avoid any copies. We support temporarily the older
1084 if (in_picture->interlaced_frame)
1085 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1087 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1088 pkt.data = (uint8_t *)in_picture;
1089 pkt.size = sizeof(AVPicture);
1090 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1091 pkt.flags |= AV_PKT_FLAG_KEY;
1093 write_frame(s, &pkt, ost);
1097 int got_packet, forced_keyframe = 0;
1100 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1101 ost->top_field_first >= 0)
1102 in_picture->top_field_first = !!ost->top_field_first;
1104 if (in_picture->interlaced_frame) {
1105 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1106 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1108 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1110 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1112 in_picture->quality = enc->global_quality;
1113 in_picture->pict_type = 0;
1115 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1116 in_picture->pts * av_q2d(enc->time_base) : NAN;
1117 if (ost->forced_kf_index < ost->forced_kf_count &&
1118 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1119 ost->forced_kf_index++;
1120 forced_keyframe = 1;
1121 } else if (ost->forced_keyframes_pexpr) {
1123 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1124 res = av_expr_eval(ost->forced_keyframes_pexpr,
1125 ost->forced_keyframes_expr_const_values, NULL);
1126 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1127 ost->forced_keyframes_expr_const_values[FKF_N],
1128 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1129 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1130 ost->forced_keyframes_expr_const_values[FKF_T],
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1134 forced_keyframe = 1;
1135 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1136 ost->forced_keyframes_expr_const_values[FKF_N];
1137 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1138 ost->forced_keyframes_expr_const_values[FKF_T];
1139 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1142 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1143 } else if ( ost->forced_keyframes
1144 && !strncmp(ost->forced_keyframes, "source", 6)
1145 && in_picture->key_frame==1) {
1146 forced_keyframe = 1;
1149 if (forced_keyframe) {
1150 in_picture->pict_type = AV_PICTURE_TYPE_I;
1151 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1154 update_benchmark(NULL);
1156 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1157 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1158 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1159 enc->time_base.num, enc->time_base.den);
1162 ost->frames_encoded++;
1164 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1165 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1167 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1173 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1174 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1175 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1176 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1179 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1180 pkt.pts = ost->sync_opts;
1182 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1185 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1188 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1191 frame_size = pkt.size;
1192 write_frame(s, &pkt, ost);
1194 /* if two pass, output log */
1195 if (ost->logfile && enc->stats_out) {
1196 fprintf(ost->logfile, "%s", enc->stats_out);
1202 * For video, number of frames in == number of packets out.
1203 * But there may be reordering, so we can't throw away frames on encoder
1204 * flush, we need to limit them here, before they go into encoder.
1206 ost->frame_number++;
1208 if (vstats_filename && frame_size)
1209 do_video_stats(ost, frame_size);
1212 if (!ost->last_frame)
1213 ost->last_frame = av_frame_alloc();
1214 av_frame_unref(ost->last_frame);
1215 if (next_picture && ost->last_frame)
1216 av_frame_ref(ost->last_frame, next_picture);
1218 av_frame_free(&ost->last_frame);
1221 static double psnr(double d)
1223 return -10.0 * log10(d);
1226 static void do_video_stats(OutputStream *ost, int frame_size)
1228 AVCodecContext *enc;
1230 double ti1, bitrate, avg_bitrate;
1232 /* this is executed just the first time do_video_stats is called */
1234 vstats_file = fopen(vstats_filename, "w");
1242 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1243 frame_number = ost->st->nb_frames;
1244 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1245 ost->quality / (float)FF_QP2LAMBDA);
1247 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1248 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1250 fprintf(vstats_file,"f_size= %6d ", frame_size);
1251 /* compute pts value */
1252 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1256 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1257 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1258 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1259 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1260 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1264 static void finish_output_stream(OutputStream *ost)
1266 OutputFile *of = output_files[ost->file_index];
1269 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1272 for (i = 0; i < of->ctx->nb_streams; i++)
1273 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1278 * Get and encode new output from any of the filtergraphs, without causing
1281 * @return 0 for success, <0 for severe errors
1283 static int reap_filters(int flush)
1285 AVFrame *filtered_frame = NULL;
1288 /* Reap all buffers present in the buffer sinks */
1289 for (i = 0; i < nb_output_streams; i++) {
1290 OutputStream *ost = output_streams[i];
1291 OutputFile *of = output_files[ost->file_index];
1292 AVFilterContext *filter;
1293 AVCodecContext *enc = ost->enc_ctx;
1298 filter = ost->filter->filter;
1300 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1301 return AVERROR(ENOMEM);
1303 filtered_frame = ost->filtered_frame;
1306 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1307 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1308 AV_BUFFERSINK_FLAG_NO_REQUEST);
1310 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1311 av_log(NULL, AV_LOG_WARNING,
1312 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1313 } else if (flush && ret == AVERROR_EOF) {
1314 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1315 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1319 if (ost->finished) {
1320 av_frame_unref(filtered_frame);
1323 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1324 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1325 AVRational tb = enc->time_base;
1326 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1328 tb.den <<= extra_bits;
1330 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1331 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1332 float_pts /= 1 << extra_bits;
1333 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1334 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1336 filtered_frame->pts =
1337 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1338 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1340 //if (ost->source_index >= 0)
1341 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1343 switch (filter->inputs[0]->type) {
1344 case AVMEDIA_TYPE_VIDEO:
1345 if (!ost->frame_aspect_ratio.num)
1346 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1349 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1350 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1352 enc->time_base.num, enc->time_base.den);
1355 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1357 case AVMEDIA_TYPE_AUDIO:
1358 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1359 enc->channels != av_frame_get_channels(filtered_frame)) {
1360 av_log(NULL, AV_LOG_ERROR,
1361 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1364 do_audio_out(of->ctx, ost, filtered_frame);
1367 // TODO support subtitle filters
1371 av_frame_unref(filtered_frame);
1378 static void print_final_stats(int64_t total_size)
1380 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1381 uint64_t subtitle_size = 0;
1382 uint64_t data_size = 0;
1383 float percent = -1.0;
1387 for (i = 0; i < nb_output_streams; i++) {
1388 OutputStream *ost = output_streams[i];
1389 switch (ost->enc_ctx->codec_type) {
1390 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1391 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1392 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1393 default: other_size += ost->data_size; break;
1395 extra_size += ost->enc_ctx->extradata_size;
1396 data_size += ost->data_size;
1397 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1398 != AV_CODEC_FLAG_PASS1)
1402 if (data_size && total_size>0 && total_size >= data_size)
1403 percent = 100.0 * (total_size - data_size) / data_size;
1405 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1406 video_size / 1024.0,
1407 audio_size / 1024.0,
1408 subtitle_size / 1024.0,
1409 other_size / 1024.0,
1410 extra_size / 1024.0);
1412 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1414 av_log(NULL, AV_LOG_INFO, "unknown");
1415 av_log(NULL, AV_LOG_INFO, "\n");
1417 /* print verbose per-stream stats */
1418 for (i = 0; i < nb_input_files; i++) {
1419 InputFile *f = input_files[i];
1420 uint64_t total_packets = 0, total_size = 0;
1422 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1423 i, f->ctx->filename);
1425 for (j = 0; j < f->nb_streams; j++) {
1426 InputStream *ist = input_streams[f->ist_index + j];
1427 enum AVMediaType type = ist->dec_ctx->codec_type;
1429 total_size += ist->data_size;
1430 total_packets += ist->nb_packets;
1432 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1433 i, j, media_type_string(type));
1434 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1435 ist->nb_packets, ist->data_size);
1437 if (ist->decoding_needed) {
1438 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1439 ist->frames_decoded);
1440 if (type == AVMEDIA_TYPE_AUDIO)
1441 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1442 av_log(NULL, AV_LOG_VERBOSE, "; ");
1445 av_log(NULL, AV_LOG_VERBOSE, "\n");
1448 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1449 total_packets, total_size);
1452 for (i = 0; i < nb_output_files; i++) {
1453 OutputFile *of = output_files[i];
1454 uint64_t total_packets = 0, total_size = 0;
1456 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1457 i, of->ctx->filename);
1459 for (j = 0; j < of->ctx->nb_streams; j++) {
1460 OutputStream *ost = output_streams[of->ost_index + j];
1461 enum AVMediaType type = ost->enc_ctx->codec_type;
1463 total_size += ost->data_size;
1464 total_packets += ost->packets_written;
1466 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1467 i, j, media_type_string(type));
1468 if (ost->encoding_needed) {
1469 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1470 ost->frames_encoded);
1471 if (type == AVMEDIA_TYPE_AUDIO)
1472 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1473 av_log(NULL, AV_LOG_VERBOSE, "; ");
1476 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1477 ost->packets_written, ost->data_size);
1479 av_log(NULL, AV_LOG_VERBOSE, "\n");
1482 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1483 total_packets, total_size);
1485 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1486 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1488 av_log(NULL, AV_LOG_WARNING, "\n");
1490 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1495 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1498 AVBPrint buf_script;
1500 AVFormatContext *oc;
1502 AVCodecContext *enc;
1503 int frame_number, vid, i;
1506 int64_t pts = INT64_MIN + 1;
1507 static int64_t last_time = -1;
1508 static int qp_histogram[52];
1509 int hours, mins, secs, us;
1513 if (!print_stats && !is_last_report && !progress_avio)
1516 if (!is_last_report) {
1517 if (last_time == -1) {
1518 last_time = cur_time;
1521 if ((cur_time - last_time) < 500000)
1523 last_time = cur_time;
1526 t = (cur_time-timer_start) / 1000000.0;
1529 oc = output_files[0]->ctx;
1531 total_size = avio_size(oc->pb);
1532 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1533 total_size = avio_tell(oc->pb);
1537 av_bprint_init(&buf_script, 0, 1);
1538 for (i = 0; i < nb_output_streams; i++) {
1540 ost = output_streams[i];
1542 if (!ost->stream_copy)
1543 q = ost->quality / (float) FF_QP2LAMBDA;
1545 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1546 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1547 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1548 ost->file_index, ost->index, q);
1550 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1553 frame_number = ost->frame_number;
1554 fps = t > 1 ? frame_number / t : 0;
1555 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1556 frame_number, fps < 9.95, fps, q);
1557 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1558 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1559 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560 ost->file_index, ost->index, q);
1562 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1566 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1568 for (j = 0; j < 32; j++)
1569 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1572 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1574 double error, error_sum = 0;
1575 double scale, scale_sum = 0;
1577 char type[3] = { 'Y','U','V' };
1578 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1579 for (j = 0; j < 3; j++) {
1580 if (is_last_report) {
1581 error = enc->error[j];
1582 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1584 error = ost->error[j];
1585 scale = enc->width * enc->height * 255.0 * 255.0;
1591 p = psnr(error / scale);
1592 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1593 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1594 ost->file_index, ost->index, type[j] | 32, p);
1596 p = psnr(error_sum / scale_sum);
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1598 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1599 ost->file_index, ost->index, p);
1603 /* compute min output value */
1604 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1605 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1606 ost->st->time_base, AV_TIME_BASE_Q));
1608 nb_frames_drop += ost->last_dropped;
1611 secs = FFABS(pts) / AV_TIME_BASE;
1612 us = FFABS(pts) % AV_TIME_BASE;
1618 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1619 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1621 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1623 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1624 "size=%8.0fkB time=", total_size / 1024.0);
1626 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1629 (100 * us) / AV_TIME_BASE);
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1633 av_bprintf(&buf_script, "bitrate=N/A\n");
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1636 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1639 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1640 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1641 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1642 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1643 hours, mins, secs, us);
1645 if (nb_frames_dup || nb_frames_drop)
1646 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1647 nb_frames_dup, nb_frames_drop);
1648 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1649 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1652 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1653 av_bprintf(&buf_script, "speed=N/A\n");
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1656 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1659 if (print_stats || is_last_report) {
1660 const char end = is_last_report ? '\n' : '\r';
1661 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1662 fprintf(stderr, "%s %c", buf, end);
1664 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1669 if (progress_avio) {
1670 av_bprintf(&buf_script, "progress=%s\n",
1671 is_last_report ? "end" : "continue");
1672 avio_write(progress_avio, buf_script.str,
1673 FFMIN(buf_script.len, buf_script.size - 1));
1674 avio_flush(progress_avio);
1675 av_bprint_finalize(&buf_script, NULL);
1676 if (is_last_report) {
1677 if ((ret = avio_closep(&progress_avio)) < 0)
1678 av_log(NULL, AV_LOG_ERROR,
1679 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1684 print_final_stats(total_size);
1687 static void flush_encoders(void)
1691 for (i = 0; i < nb_output_streams; i++) {
1692 OutputStream *ost = output_streams[i];
1693 AVCodecContext *enc = ost->enc_ctx;
1694 AVFormatContext *os = output_files[ost->file_index]->ctx;
1695 int stop_encoding = 0;
1697 if (!ost->encoding_needed)
1700 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1702 #if FF_API_LAVF_FMT_RAWPICTURE
1703 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1708 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1711 switch (enc->codec_type) {
1712 case AVMEDIA_TYPE_AUDIO:
1713 encode = avcodec_encode_audio2;
1716 case AVMEDIA_TYPE_VIDEO:
1717 encode = avcodec_encode_video2;
1728 av_init_packet(&pkt);
1732 update_benchmark(NULL);
1733 ret = encode(enc, &pkt, NULL, &got_packet);
1734 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1736 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1741 if (ost->logfile && enc->stats_out) {
1742 fprintf(ost->logfile, "%s", enc->stats_out);
1748 if (ost->finished & MUXER_FINISHED) {
1749 av_packet_unref(&pkt);
1752 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1753 pkt_size = pkt.size;
1754 write_frame(os, &pkt, ost);
1755 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1756 do_video_stats(ost, pkt_size);
1767 * Check whether a packet from ist should be written into ost at this time
1769 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1771 OutputFile *of = output_files[ost->file_index];
1772 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1774 if (ost->source_index != ist_index)
1780 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1786 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1788 OutputFile *of = output_files[ost->file_index];
1789 InputFile *f = input_files [ist->file_index];
1790 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1791 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1795 av_init_packet(&opkt);
1797 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1798 !ost->copy_initial_nonkeyframes)
1801 if (!ost->frame_number && !ost->copy_prior_start) {
1802 int64_t comp_start = start_time;
1803 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1804 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1805 if (pkt->pts == AV_NOPTS_VALUE ?
1806 ist->pts < comp_start :
1807 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1811 if (of->recording_time != INT64_MAX &&
1812 ist->pts >= of->recording_time + start_time) {
1813 close_output_stream(ost);
1817 if (f->recording_time != INT64_MAX) {
1818 start_time = f->ctx->start_time;
1819 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1820 start_time += f->start_time;
1821 if (ist->pts >= f->recording_time + start_time) {
1822 close_output_stream(ost);
1827 /* force the input stream PTS */
1828 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1831 if (pkt->pts != AV_NOPTS_VALUE)
1832 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1834 opkt.pts = AV_NOPTS_VALUE;
1836 if (pkt->dts == AV_NOPTS_VALUE)
1837 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1839 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1840 opkt.dts -= ost_tb_start_time;
1842 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1843 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1845 duration = ist->dec_ctx->frame_size;
1846 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1847 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1848 ost->st->time_base) - ost_tb_start_time;
1851 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1852 opkt.flags = pkt->flags;
1853 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1854 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1855 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1856 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1857 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1859 int ret = av_parser_change(ost->parser, ost->st->codec,
1860 &opkt.data, &opkt.size,
1861 pkt->data, pkt->size,
1862 pkt->flags & AV_PKT_FLAG_KEY);
1864 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1869 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1874 opkt.data = pkt->data;
1875 opkt.size = pkt->size;
1877 av_copy_packet_side_data(&opkt, pkt);
1879 #if FF_API_LAVF_FMT_RAWPICTURE
1880 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1881 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1882 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1883 /* store AVPicture in AVPacket, as expected by the output format */
1884 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1886 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1890 opkt.data = (uint8_t *)&pict;
1891 opkt.size = sizeof(AVPicture);
1892 opkt.flags |= AV_PKT_FLAG_KEY;
1896 write_frame(of->ctx, &opkt, ost);
1899 int guess_input_channel_layout(InputStream *ist)
1901 AVCodecContext *dec = ist->dec_ctx;
1903 if (!dec->channel_layout) {
1904 char layout_name[256];
1906 if (dec->channels > ist->guess_layout_max)
1908 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1909 if (!dec->channel_layout)
1911 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1912 dec->channels, dec->channel_layout);
1913 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1914 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1919 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1921 if (*got_output || ret<0)
1922 decode_error_stat[ret<0] ++;
1924 if (ret < 0 && exit_on_error)
1927 if (exit_on_error && *got_output && ist) {
1928 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1929 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1935 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1937 AVFrame *decoded_frame, *f;
1938 AVCodecContext *avctx = ist->dec_ctx;
1939 int i, ret, err = 0, resample_changed;
1940 AVRational decoded_frame_tb;
1942 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1943 return AVERROR(ENOMEM);
1944 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1945 return AVERROR(ENOMEM);
1946 decoded_frame = ist->decoded_frame;
1948 update_benchmark(NULL);
1949 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1950 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1952 if (ret >= 0 && avctx->sample_rate <= 0) {
1953 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1954 ret = AVERROR_INVALIDDATA;
1957 check_decode_result(ist, got_output, ret);
1959 if (!*got_output || ret < 0)
1962 ist->samples_decoded += decoded_frame->nb_samples;
1963 ist->frames_decoded++;
1966 /* increment next_dts to use for the case where the input stream does not
1967 have timestamps or there are multiple frames in the packet */
1968 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1970 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1975 ist->resample_channels != avctx->channels ||
1976 ist->resample_channel_layout != decoded_frame->channel_layout ||
1977 ist->resample_sample_rate != decoded_frame->sample_rate;
1978 if (resample_changed) {
1979 char layout1[64], layout2[64];
1981 if (!guess_input_channel_layout(ist)) {
1982 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1983 "layout for Input Stream #%d.%d\n", ist->file_index,
1987 decoded_frame->channel_layout = avctx->channel_layout;
1989 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1990 ist->resample_channel_layout);
1991 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1992 decoded_frame->channel_layout);
1994 av_log(NULL, AV_LOG_INFO,
1995 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1996 ist->file_index, ist->st->index,
1997 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1998 ist->resample_channels, layout1,
1999 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2000 avctx->channels, layout2);
2002 ist->resample_sample_fmt = decoded_frame->format;
2003 ist->resample_sample_rate = decoded_frame->sample_rate;
2004 ist->resample_channel_layout = decoded_frame->channel_layout;
2005 ist->resample_channels = avctx->channels;
2007 for (i = 0; i < nb_filtergraphs; i++)
2008 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2009 FilterGraph *fg = filtergraphs[i];
2010 if (configure_filtergraph(fg) < 0) {
2011 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2017 /* if the decoder provides a pts, use it instead of the last packet pts.
2018 the decoder could be delaying output by a packet or more. */
2019 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2020 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2021 decoded_frame_tb = avctx->time_base;
2022 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2023 decoded_frame->pts = decoded_frame->pkt_pts;
2024 decoded_frame_tb = ist->st->time_base;
2025 } else if (pkt->pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = pkt->pts;
2027 decoded_frame_tb = ist->st->time_base;
2029 decoded_frame->pts = ist->dts;
2030 decoded_frame_tb = AV_TIME_BASE_Q;
2032 pkt->pts = AV_NOPTS_VALUE;
2033 if (decoded_frame->pts != AV_NOPTS_VALUE)
2034 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2035 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2036 (AVRational){1, avctx->sample_rate});
2037 ist->nb_samples = decoded_frame->nb_samples;
2038 for (i = 0; i < ist->nb_filters; i++) {
2039 if (i < ist->nb_filters - 1) {
2040 f = ist->filter_frame;
2041 err = av_frame_ref(f, decoded_frame);
2046 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2047 AV_BUFFERSRC_FLAG_PUSH);
2048 if (err == AVERROR_EOF)
2049 err = 0; /* ignore */
2053 decoded_frame->pts = AV_NOPTS_VALUE;
2055 av_frame_unref(ist->filter_frame);
2056 av_frame_unref(decoded_frame);
2057 return err < 0 ? err : ret;
2060 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2062 AVFrame *decoded_frame, *f;
2063 int i, ret = 0, err = 0, resample_changed;
2064 int64_t best_effort_timestamp;
2065 AVRational *frame_sample_aspect;
2067 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2068 return AVERROR(ENOMEM);
2069 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 decoded_frame = ist->decoded_frame;
2072 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2074 update_benchmark(NULL);
2075 ret = avcodec_decode_video2(ist->dec_ctx,
2076 decoded_frame, got_output, pkt);
2077 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2079 // The following line may be required in some cases where there is no parser
2080 // or the parser does not has_b_frames correctly
2081 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2082 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2083 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2085 av_log(ist->dec_ctx, AV_LOG_WARNING,
2086 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2087 "If you want to help, upload a sample "
2088 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2089 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2090 ist->dec_ctx->has_b_frames,
2091 ist->st->codec->has_b_frames);
2094 check_decode_result(ist, got_output, ret);
2096 if (*got_output && ret >= 0) {
2097 if (ist->dec_ctx->width != decoded_frame->width ||
2098 ist->dec_ctx->height != decoded_frame->height ||
2099 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2100 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2101 decoded_frame->width,
2102 decoded_frame->height,
2103 decoded_frame->format,
2104 ist->dec_ctx->width,
2105 ist->dec_ctx->height,
2106 ist->dec_ctx->pix_fmt);
2110 if (!*got_output || ret < 0)
2113 if(ist->top_field_first>=0)
2114 decoded_frame->top_field_first = ist->top_field_first;
2116 ist->frames_decoded++;
2118 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2119 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2123 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2125 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2126 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2127 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2129 if (ts != AV_NOPTS_VALUE)
2130 ist->next_pts = ist->pts = ts;
2134 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2135 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2136 ist->st->index, av_ts2str(decoded_frame->pts),
2137 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2138 best_effort_timestamp,
2139 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2140 decoded_frame->key_frame, decoded_frame->pict_type,
2141 ist->st->time_base.num, ist->st->time_base.den);
2146 if (ist->st->sample_aspect_ratio.num)
2147 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2149 resample_changed = ist->resample_width != decoded_frame->width ||
2150 ist->resample_height != decoded_frame->height ||
2151 ist->resample_pix_fmt != decoded_frame->format;
2152 if (resample_changed) {
2153 av_log(NULL, AV_LOG_INFO,
2154 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2155 ist->file_index, ist->st->index,
2156 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2157 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2159 ist->resample_width = decoded_frame->width;
2160 ist->resample_height = decoded_frame->height;
2161 ist->resample_pix_fmt = decoded_frame->format;
2163 for (i = 0; i < nb_filtergraphs; i++) {
2164 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2165 configure_filtergraph(filtergraphs[i]) < 0) {
2166 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2172 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2173 for (i = 0; i < ist->nb_filters; i++) {
2174 if (!frame_sample_aspect->num)
2175 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2177 if (i < ist->nb_filters - 1) {
2178 f = ist->filter_frame;
2179 err = av_frame_ref(f, decoded_frame);
2184 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2185 if (ret == AVERROR_EOF) {
2186 ret = 0; /* ignore */
2187 } else if (ret < 0) {
2188 av_log(NULL, AV_LOG_FATAL,
2189 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2195 av_frame_unref(ist->filter_frame);
2196 av_frame_unref(decoded_frame);
2197 return err < 0 ? err : ret;
2200 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2202 AVSubtitle subtitle;
2203 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2204 &subtitle, got_output, pkt);
2206 check_decode_result(NULL, got_output, ret);
2208 if (ret < 0 || !*got_output) {
2210 sub2video_flush(ist);
2214 if (ist->fix_sub_duration) {
2216 if (ist->prev_sub.got_output) {
2217 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2218 1000, AV_TIME_BASE);
2219 if (end < ist->prev_sub.subtitle.end_display_time) {
2220 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2221 "Subtitle duration reduced from %d to %d%s\n",
2222 ist->prev_sub.subtitle.end_display_time, end,
2223 end <= 0 ? ", dropping it" : "");
2224 ist->prev_sub.subtitle.end_display_time = end;
2227 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2228 FFSWAP(int, ret, ist->prev_sub.ret);
2229 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2237 sub2video_update(ist, &subtitle);
2239 if (!subtitle.num_rects)
2242 ist->frames_decoded++;
2244 for (i = 0; i < nb_output_streams; i++) {
2245 OutputStream *ost = output_streams[i];
2247 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2248 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2251 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2255 avsubtitle_free(&subtitle);
2259 static int send_filter_eof(InputStream *ist)
2262 for (i = 0; i < ist->nb_filters; i++) {
2263 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2270 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2271 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2277 if (!ist->saw_first_ts) {
2278 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2280 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2281 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2282 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2284 ist->saw_first_ts = 1;
2287 if (ist->next_dts == AV_NOPTS_VALUE)
2288 ist->next_dts = ist->dts;
2289 if (ist->next_pts == AV_NOPTS_VALUE)
2290 ist->next_pts = ist->pts;
2294 av_init_packet(&avpkt);
2302 if (pkt->dts != AV_NOPTS_VALUE) {
2303 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2304 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2305 ist->next_pts = ist->pts = ist->dts;
2308 // while we have more to decode or while the decoder did output something on EOF
2309 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2313 ist->pts = ist->next_pts;
2314 ist->dts = ist->next_dts;
2316 if (avpkt.size && avpkt.size != pkt->size &&
2317 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2318 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2319 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2320 ist->showed_multi_packet_warning = 1;
2323 switch (ist->dec_ctx->codec_type) {
2324 case AVMEDIA_TYPE_AUDIO:
2325 ret = decode_audio (ist, &avpkt, &got_output);
2327 case AVMEDIA_TYPE_VIDEO:
2328 ret = decode_video (ist, &avpkt, &got_output);
2329 if (avpkt.duration) {
2330 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2331 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2332 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2333 duration = ((int64_t)AV_TIME_BASE *
2334 ist->dec_ctx->framerate.den * ticks) /
2335 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2339 if(ist->dts != AV_NOPTS_VALUE && duration) {
2340 ist->next_dts += duration;
2342 ist->next_dts = AV_NOPTS_VALUE;
2345 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2347 case AVMEDIA_TYPE_SUBTITLE:
2348 ret = transcode_subtitles(ist, &avpkt, &got_output);
2355 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2356 ist->file_index, ist->st->index, av_err2str(ret));
2363 avpkt.pts= AV_NOPTS_VALUE;
2365 // touch data and size only if not EOF
2367 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2375 if (got_output && !pkt)
2379 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2380 /* except when looping we need to flush but not to send an EOF */
2381 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2382 int ret = send_filter_eof(ist);
2384 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2389 /* handle stream copy */
2390 if (!ist->decoding_needed) {
2391 ist->dts = ist->next_dts;
2392 switch (ist->dec_ctx->codec_type) {
2393 case AVMEDIA_TYPE_AUDIO:
2394 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2395 ist->dec_ctx->sample_rate;
2397 case AVMEDIA_TYPE_VIDEO:
2398 if (ist->framerate.num) {
2399 // TODO: Remove work-around for c99-to-c89 issue 7
2400 AVRational time_base_q = AV_TIME_BASE_Q;
2401 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2402 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2403 } else if (pkt->duration) {
2404 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2405 } else if(ist->dec_ctx->framerate.num != 0) {
2406 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2407 ist->next_dts += ((int64_t)AV_TIME_BASE *
2408 ist->dec_ctx->framerate.den * ticks) /
2409 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2413 ist->pts = ist->dts;
2414 ist->next_pts = ist->next_dts;
2416 for (i = 0; pkt && i < nb_output_streams; i++) {
2417 OutputStream *ost = output_streams[i];
2419 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2422 do_streamcopy(ist, ost, pkt);
2428 static void print_sdp(void)
2433 AVIOContext *sdp_pb;
2434 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2438 for (i = 0, j = 0; i < nb_output_files; i++) {
2439 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2440 avc[j] = output_files[i]->ctx;
2448 av_sdp_create(avc, j, sdp, sizeof(sdp));
2450 if (!sdp_filename) {
2451 printf("SDP:\n%s\n", sdp);
2454 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2455 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2457 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2458 avio_closep(&sdp_pb);
2459 av_freep(&sdp_filename);
2467 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2470 for (i = 0; hwaccels[i].name; i++)
2471 if (hwaccels[i].pix_fmt == pix_fmt)
2472 return &hwaccels[i];
2476 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2478 InputStream *ist = s->opaque;
2479 const enum AVPixelFormat *p;
2482 for (p = pix_fmts; *p != -1; p++) {
2483 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2484 const HWAccel *hwaccel;
2486 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2489 hwaccel = get_hwaccel(*p);
2491 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2492 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2495 ret = hwaccel->init(s);
2497 if (ist->hwaccel_id == hwaccel->id) {
2498 av_log(NULL, AV_LOG_FATAL,
2499 "%s hwaccel requested for input stream #%d:%d, "
2500 "but cannot be initialized.\n", hwaccel->name,
2501 ist->file_index, ist->st->index);
2502 return AV_PIX_FMT_NONE;
2506 ist->active_hwaccel_id = hwaccel->id;
2507 ist->hwaccel_pix_fmt = *p;
2514 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2516 InputStream *ist = s->opaque;
2518 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2519 return ist->hwaccel_get_buffer(s, frame, flags);
2521 return avcodec_default_get_buffer2(s, frame, flags);
2524 static int init_input_stream(int ist_index, char *error, int error_len)
2527 InputStream *ist = input_streams[ist_index];
2529 if (ist->decoding_needed) {
2530 AVCodec *codec = ist->dec;
2532 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2533 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2534 return AVERROR(EINVAL);
2537 ist->dec_ctx->opaque = ist;
2538 ist->dec_ctx->get_format = get_format;
2539 ist->dec_ctx->get_buffer2 = get_buffer;
2540 ist->dec_ctx->thread_safe_callbacks = 1;
2542 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2543 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2544 (ist->decoding_needed & DECODING_FOR_OST)) {
2545 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2546 if (ist->decoding_needed & DECODING_FOR_FILTER)
2547 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2550 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2551 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2552 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2553 if (ret == AVERROR_EXPERIMENTAL)
2554 abort_codec_experimental(codec, 0);
2556 snprintf(error, error_len,
2557 "Error while opening decoder for input stream "
2559 ist->file_index, ist->st->index, av_err2str(ret));
2562 assert_avoptions(ist->decoder_opts);
2565 ist->next_pts = AV_NOPTS_VALUE;
2566 ist->next_dts = AV_NOPTS_VALUE;
2571 static InputStream *get_input_stream(OutputStream *ost)
2573 if (ost->source_index >= 0)
2574 return input_streams[ost->source_index];
2578 static int compare_int64(const void *a, const void *b)
2580 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2583 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2587 if (ost->encoding_needed) {
2588 AVCodec *codec = ost->enc;
2589 AVCodecContext *dec = NULL;
2592 if ((ist = get_input_stream(ost)))
2594 if (dec && dec->subtitle_header) {
2595 /* ASS code assumes this buffer is null terminated so add extra byte. */
2596 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2597 if (!ost->enc_ctx->subtitle_header)
2598 return AVERROR(ENOMEM);
2599 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2600 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2602 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2603 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2604 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2606 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2607 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2608 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2610 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2611 if (ret == AVERROR_EXPERIMENTAL)
2612 abort_codec_experimental(codec, 1);
2613 snprintf(error, error_len,
2614 "Error while opening encoder for output stream #%d:%d - "
2615 "maybe incorrect parameters such as bit_rate, rate, width or height",
2616 ost->file_index, ost->index);
2619 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2620 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2621 av_buffersink_set_frame_size(ost->filter->filter,
2622 ost->enc_ctx->frame_size);
2623 assert_avoptions(ost->encoder_opts);
2624 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2625 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2626 " It takes bits/s as argument, not kbits/s\n");
2628 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2630 av_log(NULL, AV_LOG_FATAL,
2631 "Error initializing the output stream codec context.\n");
2635 if (ost->enc_ctx->nb_coded_side_data) {
2638 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2639 sizeof(*ost->st->side_data));
2640 if (!ost->st->side_data)
2641 return AVERROR(ENOMEM);
2643 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2644 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2645 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2647 sd_dst->data = av_malloc(sd_src->size);
2649 return AVERROR(ENOMEM);
2650 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2651 sd_dst->size = sd_src->size;
2652 sd_dst->type = sd_src->type;
2653 ost->st->nb_side_data++;
2657 // copy timebase while removing common factors
2658 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2659 ost->st->codec->codec= ost->enc_ctx->codec;
2661 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2663 av_log(NULL, AV_LOG_FATAL,
2664 "Error setting up codec context options.\n");
2667 // copy timebase while removing common factors
2668 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2674 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2675 AVCodecContext *avctx)
2678 int n = 1, i, size, index = 0;
2681 for (p = kf; *p; p++)
2685 pts = av_malloc_array(size, sizeof(*pts));
2687 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2692 for (i = 0; i < n; i++) {
2693 char *next = strchr(p, ',');
2698 if (!memcmp(p, "chapters", 8)) {
2700 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2703 if (avf->nb_chapters > INT_MAX - size ||
2704 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2706 av_log(NULL, AV_LOG_FATAL,
2707 "Could not allocate forced key frames array.\n");
2710 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2711 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2713 for (j = 0; j < avf->nb_chapters; j++) {
2714 AVChapter *c = avf->chapters[j];
2715 av_assert1(index < size);
2716 pts[index++] = av_rescale_q(c->start, c->time_base,
2717 avctx->time_base) + t;
2722 t = parse_time_or_die("force_key_frames", p, 1);
2723 av_assert1(index < size);
2724 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2731 av_assert0(index == size);
2732 qsort(pts, size, sizeof(*pts), compare_int64);
2733 ost->forced_kf_count = size;
2734 ost->forced_kf_pts = pts;
2737 static void report_new_stream(int input_index, AVPacket *pkt)
2739 InputFile *file = input_files[input_index];
2740 AVStream *st = file->ctx->streams[pkt->stream_index];
2742 if (pkt->stream_index < file->nb_streams_warn)
2744 av_log(file->ctx, AV_LOG_WARNING,
2745 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2746 av_get_media_type_string(st->codec->codec_type),
2747 input_index, pkt->stream_index,
2748 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2749 file->nb_streams_warn = pkt->stream_index + 1;
2752 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2754 AVDictionaryEntry *e;
2756 uint8_t *encoder_string;
2757 int encoder_string_len;
2758 int format_flags = 0;
2759 int codec_flags = 0;
2761 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2764 e = av_dict_get(of->opts, "fflags", NULL, 0);
2766 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2769 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2771 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2773 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2776 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2779 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2780 encoder_string = av_mallocz(encoder_string_len);
2781 if (!encoder_string)
2784 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2785 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2787 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2788 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2789 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2790 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2793 static int transcode_init(void)
2795 int ret = 0, i, j, k;
2796 AVFormatContext *oc;
2799 char error[1024] = {0};
2802 for (i = 0; i < nb_filtergraphs; i++) {
2803 FilterGraph *fg = filtergraphs[i];
2804 for (j = 0; j < fg->nb_outputs; j++) {
2805 OutputFilter *ofilter = fg->outputs[j];
2806 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2808 if (fg->nb_inputs != 1)
2810 for (k = nb_input_streams-1; k >= 0 ; k--)
2811 if (fg->inputs[0]->ist == input_streams[k])
2813 ofilter->ost->source_index = k;
2817 /* init framerate emulation */
2818 for (i = 0; i < nb_input_files; i++) {
2819 InputFile *ifile = input_files[i];
2820 if (ifile->rate_emu)
2821 for (j = 0; j < ifile->nb_streams; j++)
2822 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2825 /* for each output stream, we compute the right encoding parameters */
2826 for (i = 0; i < nb_output_streams; i++) {
2827 AVCodecContext *enc_ctx;
2828 AVCodecContext *dec_ctx = NULL;
2829 ost = output_streams[i];
2830 oc = output_files[ost->file_index]->ctx;
2831 ist = get_input_stream(ost);
2833 if (ost->attachment_filename)
2836 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2839 dec_ctx = ist->dec_ctx;
2841 ost->st->disposition = ist->st->disposition;
2842 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2843 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2845 for (j=0; j<oc->nb_streams; j++) {
2846 AVStream *st = oc->streams[j];
2847 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2850 if (j == oc->nb_streams)
2851 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2852 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2855 if (ost->stream_copy) {
2857 uint64_t extra_size;
2859 av_assert0(ist && !ost->filter);
2861 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2863 if (extra_size > INT_MAX) {
2864 return AVERROR(EINVAL);
2867 /* if stream_copy is selected, no need to decode or encode */
2868 enc_ctx->codec_id = dec_ctx->codec_id;
2869 enc_ctx->codec_type = dec_ctx->codec_type;
2871 if (!enc_ctx->codec_tag) {
2872 unsigned int codec_tag;
2873 if (!oc->oformat->codec_tag ||
2874 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2875 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2876 enc_ctx->codec_tag = dec_ctx->codec_tag;
2879 enc_ctx->bit_rate = dec_ctx->bit_rate;
2880 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2881 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2882 enc_ctx->field_order = dec_ctx->field_order;
2883 if (dec_ctx->extradata_size) {
2884 enc_ctx->extradata = av_mallocz(extra_size);
2885 if (!enc_ctx->extradata) {
2886 return AVERROR(ENOMEM);
2888 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2890 enc_ctx->extradata_size= dec_ctx->extradata_size;
2891 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2893 enc_ctx->time_base = ist->st->time_base;
2895 * Avi is a special case here because it supports variable fps but
2896 * having the fps and timebase differe significantly adds quite some
2899 if(!strcmp(oc->oformat->name, "avi")) {
2900 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2901 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2902 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2903 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2905 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2906 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2907 enc_ctx->ticks_per_frame = 2;
2908 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2909 && av_q2d(ist->st->time_base) < 1.0/500
2911 enc_ctx->time_base = dec_ctx->time_base;
2912 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2913 enc_ctx->time_base.den *= 2;
2914 enc_ctx->ticks_per_frame = 2;
2916 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2917 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2918 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2919 && strcmp(oc->oformat->name, "f4v")
2921 if( copy_tb<0 && dec_ctx->time_base.den
2922 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2923 && av_q2d(ist->st->time_base) < 1.0/500
2925 enc_ctx->time_base = dec_ctx->time_base;
2926 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2929 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2930 && dec_ctx->time_base.num < dec_ctx->time_base.den
2931 && dec_ctx->time_base.num > 0
2932 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2933 enc_ctx->time_base = dec_ctx->time_base;
2936 if (!ost->frame_rate.num)
2937 ost->frame_rate = ist->framerate;
2938 if(ost->frame_rate.num)
2939 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2941 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2942 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2944 if (ist->st->nb_side_data) {
2945 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2946 sizeof(*ist->st->side_data));
2947 if (!ost->st->side_data)
2948 return AVERROR(ENOMEM);
2950 ost->st->nb_side_data = 0;
2951 for (j = 0; j < ist->st->nb_side_data; j++) {
2952 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2953 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2955 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2958 sd_dst->data = av_malloc(sd_src->size);
2960 return AVERROR(ENOMEM);
2961 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2962 sd_dst->size = sd_src->size;
2963 sd_dst->type = sd_src->type;
2964 ost->st->nb_side_data++;
2968 ost->parser = av_parser_init(enc_ctx->codec_id);
2970 switch (enc_ctx->codec_type) {
2971 case AVMEDIA_TYPE_AUDIO:
2972 if (audio_volume != 256) {
2973 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2976 enc_ctx->channel_layout = dec_ctx->channel_layout;
2977 enc_ctx->sample_rate = dec_ctx->sample_rate;
2978 enc_ctx->channels = dec_ctx->channels;
2979 enc_ctx->frame_size = dec_ctx->frame_size;
2980 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2981 enc_ctx->block_align = dec_ctx->block_align;
2982 enc_ctx->initial_padding = dec_ctx->delay;
2983 enc_ctx->profile = dec_ctx->profile;
2984 #if FF_API_AUDIOENC_DELAY
2985 enc_ctx->delay = dec_ctx->delay;
2987 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2988 enc_ctx->block_align= 0;
2989 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2990 enc_ctx->block_align= 0;
2992 case AVMEDIA_TYPE_VIDEO:
2993 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2994 enc_ctx->width = dec_ctx->width;
2995 enc_ctx->height = dec_ctx->height;
2996 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2997 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2999 av_mul_q(ost->frame_aspect_ratio,
3000 (AVRational){ enc_ctx->height, enc_ctx->width });
3001 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3002 "with stream copy may produce invalid files\n");
3004 else if (ist->st->sample_aspect_ratio.num)
3005 sar = ist->st->sample_aspect_ratio;
3007 sar = dec_ctx->sample_aspect_ratio;
3008 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3009 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3010 ost->st->r_frame_rate = ist->st->r_frame_rate;
3012 case AVMEDIA_TYPE_SUBTITLE:
3013 enc_ctx->width = dec_ctx->width;
3014 enc_ctx->height = dec_ctx->height;
3016 case AVMEDIA_TYPE_UNKNOWN:
3017 case AVMEDIA_TYPE_DATA:
3018 case AVMEDIA_TYPE_ATTACHMENT:
3025 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3027 /* should only happen when a default codec is not present. */
3028 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3029 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3030 ret = AVERROR(EINVAL);
3034 set_encoder_id(output_files[ost->file_index], ost);
3037 if (qsv_transcode_init(ost))
3042 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3043 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3045 fg = init_simple_filtergraph(ist, ost);
3046 if (configure_filtergraph(fg)) {
3047 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3052 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3053 if (!ost->frame_rate.num)
3054 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3055 if (ist && !ost->frame_rate.num)
3056 ost->frame_rate = ist->framerate;
3057 if (ist && !ost->frame_rate.num)
3058 ost->frame_rate = ist->st->r_frame_rate;
3059 if (ist && !ost->frame_rate.num) {
3060 ost->frame_rate = (AVRational){25, 1};
3061 av_log(NULL, AV_LOG_WARNING,
3063 "about the input framerate is available. Falling "
3064 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3065 "if you want a different framerate.\n",
3066 ost->file_index, ost->index);
3068 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3069 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3070 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3071 ost->frame_rate = ost->enc->supported_framerates[idx];
3073 // reduce frame rate for mpeg4 to be within the spec limits
3074 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3075 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3076 ost->frame_rate.num, ost->frame_rate.den, 65535);
3080 switch (enc_ctx->codec_type) {
3081 case AVMEDIA_TYPE_AUDIO:
3082 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3083 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3084 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3085 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3086 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3088 case AVMEDIA_TYPE_VIDEO:
3089 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3090 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3091 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3092 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3093 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3094 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3095 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3097 for (j = 0; j < ost->forced_kf_count; j++)
3098 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3100 enc_ctx->time_base);
3102 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3103 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3104 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3105 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3106 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3107 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3108 if (!strncmp(ost->enc->name, "libx264", 7) &&
3109 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3110 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3111 av_log(NULL, AV_LOG_WARNING,
3112 "No pixel format specified, %s for H.264 encoding chosen.\n"
3113 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3114 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3115 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3116 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3117 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3118 av_log(NULL, AV_LOG_WARNING,
3119 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3120 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3121 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3122 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3124 ost->st->avg_frame_rate = ost->frame_rate;
3127 enc_ctx->width != dec_ctx->width ||
3128 enc_ctx->height != dec_ctx->height ||
3129 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3130 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3133 if (ost->forced_keyframes) {
3134 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3135 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3136 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3138 av_log(NULL, AV_LOG_ERROR,
3139 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3142 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3143 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3144 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3145 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3147 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3148 // parse it only for static kf timings
3149 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3150 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3154 case AVMEDIA_TYPE_SUBTITLE:
3155 enc_ctx->time_base = (AVRational){1, 1000};
3156 if (!enc_ctx->width) {
3157 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3158 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3161 case AVMEDIA_TYPE_DATA:
3169 if (ost->disposition) {
3170 static const AVOption opts[] = {
3171 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3172 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3173 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3174 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3175 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3176 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3177 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3178 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3179 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3180 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3181 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3182 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3183 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3184 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3187 static const AVClass class = {
3189 .item_name = av_default_item_name,
3191 .version = LIBAVUTIL_VERSION_INT,
3193 const AVClass *pclass = &class;
3195 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3201 /* init input streams */
3202 for (i = 0; i < nb_input_streams; i++)
3203 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3204 for (i = 0; i < nb_output_streams; i++) {
3205 ost = output_streams[i];
3206 avcodec_close(ost->enc_ctx);
3211 /* open each encoder */
3212 for (i = 0; i < nb_output_streams; i++) {
3213 ret = init_output_stream(output_streams[i], error, sizeof(error));
3218 /* discard unused programs */
3219 for (i = 0; i < nb_input_files; i++) {
3220 InputFile *ifile = input_files[i];
3221 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3222 AVProgram *p = ifile->ctx->programs[j];
3223 int discard = AVDISCARD_ALL;
3225 for (k = 0; k < p->nb_stream_indexes; k++)
3226 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3227 discard = AVDISCARD_DEFAULT;
3230 p->discard = discard;
3234 /* open files and write file headers */
3235 for (i = 0; i < nb_output_files; i++) {
3236 oc = output_files[i]->ctx;
3237 oc->interrupt_callback = int_cb;
3238 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3239 snprintf(error, sizeof(error),
3240 "Could not write header for output file #%d "
3241 "(incorrect codec parameters ?): %s",
3242 i, av_err2str(ret));
3243 ret = AVERROR(EINVAL);
3246 // assert_avoptions(output_files[i]->opts);
3247 if (strcmp(oc->oformat->name, "rtp")) {
3253 /* dump the file output parameters - cannot be done before in case
3255 for (i = 0; i < nb_output_files; i++) {
3256 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3259 /* dump the stream mapping */
3260 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3261 for (i = 0; i < nb_input_streams; i++) {
3262 ist = input_streams[i];
3264 for (j = 0; j < ist->nb_filters; j++) {
3265 if (ist->filters[j]->graph->graph_desc) {
3266 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3267 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3268 ist->filters[j]->name);
3269 if (nb_filtergraphs > 1)
3270 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3271 av_log(NULL, AV_LOG_INFO, "\n");
3276 for (i = 0; i < nb_output_streams; i++) {
3277 ost = output_streams[i];
3279 if (ost->attachment_filename) {
3280 /* an attached file */
3281 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3282 ost->attachment_filename, ost->file_index, ost->index);
3286 if (ost->filter && ost->filter->graph->graph_desc) {
3287 /* output from a complex graph */
3288 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3289 if (nb_filtergraphs > 1)
3290 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3292 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3293 ost->index, ost->enc ? ost->enc->name : "?");
3297 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3298 input_streams[ost->source_index]->file_index,
3299 input_streams[ost->source_index]->st->index,
3302 if (ost->sync_ist != input_streams[ost->source_index])
3303 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3304 ost->sync_ist->file_index,
3305 ost->sync_ist->st->index);
3306 if (ost->stream_copy)
3307 av_log(NULL, AV_LOG_INFO, " (copy)");
3309 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3310 const AVCodec *out_codec = ost->enc;
3311 const char *decoder_name = "?";
3312 const char *in_codec_name = "?";
3313 const char *encoder_name = "?";
3314 const char *out_codec_name = "?";
3315 const AVCodecDescriptor *desc;
3318 decoder_name = in_codec->name;
3319 desc = avcodec_descriptor_get(in_codec->id);
3321 in_codec_name = desc->name;
3322 if (!strcmp(decoder_name, in_codec_name))
3323 decoder_name = "native";
3327 encoder_name = out_codec->name;
3328 desc = avcodec_descriptor_get(out_codec->id);
3330 out_codec_name = desc->name;
3331 if (!strcmp(encoder_name, out_codec_name))
3332 encoder_name = "native";
3335 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3336 in_codec_name, decoder_name,
3337 out_codec_name, encoder_name);
3339 av_log(NULL, AV_LOG_INFO, "\n");
3343 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3347 if (sdp_filename || want_sdp) {
3351 transcode_init_done = 1;
3356 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3357 static int need_output(void)
3361 for (i = 0; i < nb_output_streams; i++) {
3362 OutputStream *ost = output_streams[i];
3363 OutputFile *of = output_files[ost->file_index];
3364 AVFormatContext *os = output_files[ost->file_index]->ctx;
3366 if (ost->finished ||
3367 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3369 if (ost->frame_number >= ost->max_frames) {
3371 for (j = 0; j < of->ctx->nb_streams; j++)
3372 close_output_stream(output_streams[of->ost_index + j]);
3383 * Select the output stream to process.
3385 * @return selected output stream, or NULL if none available
3387 static OutputStream *choose_output(void)
3390 int64_t opts_min = INT64_MAX;
3391 OutputStream *ost_min = NULL;
3393 for (i = 0; i < nb_output_streams; i++) {
3394 OutputStream *ost = output_streams[i];
3395 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3396 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3398 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3399 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3401 if (!ost->finished && opts < opts_min) {
3403 ost_min = ost->unavailable ? NULL : ost;
3409 static void set_tty_echo(int on)
3413 if (tcgetattr(0, &tty) == 0) {
3414 if (on) tty.c_lflag |= ECHO;
3415 else tty.c_lflag &= ~ECHO;
3416 tcsetattr(0, TCSANOW, &tty);
3421 static int check_keyboard_interaction(int64_t cur_time)
3424 static int64_t last_time;
3425 if (received_nb_signals)
3426 return AVERROR_EXIT;
3427 /* read_key() returns 0 on EOF */
3428 if(cur_time - last_time >= 100000 && !run_as_daemon){
3430 last_time = cur_time;
3434 return AVERROR_EXIT;
3435 if (key == '+') av_log_set_level(av_log_get_level()+10);
3436 if (key == '-') av_log_set_level(av_log_get_level()-10);
3437 if (key == 's') qp_hist ^= 1;
3440 do_hex_dump = do_pkt_dump = 0;
3441 } else if(do_pkt_dump){
3445 av_log_set_level(AV_LOG_DEBUG);
3447 if (key == 'c' || key == 'C'){
3448 char buf[4096], target[64], command[256], arg[256] = {0};
3451 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3454 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3459 fprintf(stderr, "\n");
3461 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3462 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3463 target, time, command, arg);
3464 for (i = 0; i < nb_filtergraphs; i++) {
3465 FilterGraph *fg = filtergraphs[i];
3468 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3469 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3470 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3471 } else if (key == 'c') {
3472 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3473 ret = AVERROR_PATCHWELCOME;
3475 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3477 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3482 av_log(NULL, AV_LOG_ERROR,
3483 "Parse error, at least 3 arguments were expected, "
3484 "only %d given in string '%s'\n", n, buf);
3487 if (key == 'd' || key == 'D'){
3490 debug = input_streams[0]->st->codec->debug<<1;
3491 if(!debug) debug = 1;
3492 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3499 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3504 fprintf(stderr, "\n");
3505 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3506 fprintf(stderr,"error parsing debug value\n");
3508 for(i=0;i<nb_input_streams;i++) {
3509 input_streams[i]->st->codec->debug = debug;
3511 for(i=0;i<nb_output_streams;i++) {
3512 OutputStream *ost = output_streams[i];
3513 ost->enc_ctx->debug = debug;
3515 if(debug) av_log_set_level(AV_LOG_DEBUG);
3516 fprintf(stderr,"debug=%d\n", debug);
3519 fprintf(stderr, "key function\n"
3520 "? show this help\n"
3521 "+ increase verbosity\n"
3522 "- decrease verbosity\n"
3523 "c Send command to first matching filter supporting it\n"
3524 "C Send/Que command to all matching filters\n"
3525 "D cycle through available debug modes\n"
3526 "h dump packets/hex press to cycle through the 3 states\n"
3528 "s Show QP histogram\n"
3535 static void *input_thread(void *arg)
3538 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3543 ret = av_read_frame(f->ctx, &pkt);
3545 if (ret == AVERROR(EAGAIN)) {
3550 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3553 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3554 if (flags && ret == AVERROR(EAGAIN)) {
3556 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3557 av_log(f->ctx, AV_LOG_WARNING,
3558 "Thread message queue blocking; consider raising the "
3559 "thread_queue_size option (current value: %d)\n",
3560 f->thread_queue_size);
3563 if (ret != AVERROR_EOF)
3564 av_log(f->ctx, AV_LOG_ERROR,
3565 "Unable to send packet to main thread: %s\n",
3567 av_packet_unref(&pkt);
3568 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3576 static void free_input_threads(void)
3580 for (i = 0; i < nb_input_files; i++) {
3581 InputFile *f = input_files[i];
3584 if (!f || !f->in_thread_queue)
3586 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3587 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3588 av_packet_unref(&pkt);
3590 pthread_join(f->thread, NULL);
3592 av_thread_message_queue_free(&f->in_thread_queue);
3596 static int init_input_threads(void)
3600 if (nb_input_files == 1)
3603 for (i = 0; i < nb_input_files; i++) {
3604 InputFile *f = input_files[i];
3606 if (f->ctx->pb ? !f->ctx->pb->seekable :
3607 strcmp(f->ctx->iformat->name, "lavfi"))
3608 f->non_blocking = 1;
3609 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3610 f->thread_queue_size, sizeof(AVPacket));
3614 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3615 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3616 av_thread_message_queue_free(&f->in_thread_queue);
3617 return AVERROR(ret);
3623 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3625 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3627 AV_THREAD_MESSAGE_NONBLOCK : 0);
3631 static int get_input_packet(InputFile *f, AVPacket *pkt)
3635 for (i = 0; i < f->nb_streams; i++) {
3636 InputStream *ist = input_streams[f->ist_index + i];
3637 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3638 int64_t now = av_gettime_relative() - ist->start;
3640 return AVERROR(EAGAIN);
3645 if (nb_input_files > 1)
3646 return get_input_packet_mt(f, pkt);
3648 return av_read_frame(f->ctx, pkt);
3651 static int got_eagain(void)
3654 for (i = 0; i < nb_output_streams; i++)
3655 if (output_streams[i]->unavailable)
3660 static void reset_eagain(void)
3663 for (i = 0; i < nb_input_files; i++)
3664 input_files[i]->eagain = 0;
3665 for (i = 0; i < nb_output_streams; i++)
3666 output_streams[i]->unavailable = 0;
3669 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3670 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3671 AVRational time_base)
3677 return tmp_time_base;
3680 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3683 return tmp_time_base;
3689 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3692 AVCodecContext *avctx;
3693 int i, ret, has_audio = 0;
3694 int64_t duration = 0;
3696 ret = av_seek_frame(is, -1, is->start_time, 0);
3700 for (i = 0; i < ifile->nb_streams; i++) {
3701 ist = input_streams[ifile->ist_index + i];
3702 avctx = ist->dec_ctx;
3705 if (ist->decoding_needed) {
3706 process_input_packet(ist, NULL, 1);
3707 avcodec_flush_buffers(avctx);
3710 /* duration is the length of the last frame in a stream
3711 * when audio stream is present we don't care about
3712 * last video frame length because it's not defined exactly */
3713 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3717 for (i = 0; i < ifile->nb_streams; i++) {
3718 ist = input_streams[ifile->ist_index + i];
3719 avctx = ist->dec_ctx;
3722 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3723 AVRational sample_rate = {1, avctx->sample_rate};
3725 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3729 if (ist->framerate.num) {
3730 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3731 } else if (ist->st->avg_frame_rate.num) {
3732 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3733 } else duration = 1;
3735 if (!ifile->duration)
3736 ifile->time_base = ist->st->time_base;
3737 /* the total duration of the stream, max_pts - min_pts is
3738 * the duration of the stream without the last frame */
3739 duration += ist->max_pts - ist->min_pts;
3740 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3744 if (ifile->loop > 0)
3752 * - 0 -- one packet was read and processed
3753 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3754 * this function should be called again
3755 * - AVERROR_EOF -- this function should not be called again
3757 static int process_input(int file_index)
3759 InputFile *ifile = input_files[file_index];
3760 AVFormatContext *is;
3768 ret = get_input_packet(ifile, &pkt);
3770 if (ret == AVERROR(EAGAIN)) {
3774 if (ret < 0 && ifile->loop) {
3775 if ((ret = seek_to_start(ifile, is)) < 0)
3777 ret = get_input_packet(ifile, &pkt);
3780 if (ret != AVERROR_EOF) {
3781 print_error(is->filename, ret);
3786 for (i = 0; i < ifile->nb_streams; i++) {
3787 ist = input_streams[ifile->ist_index + i];
3788 if (ist->decoding_needed) {
3789 ret = process_input_packet(ist, NULL, 0);
3794 /* mark all outputs that don't go through lavfi as finished */
3795 for (j = 0; j < nb_output_streams; j++) {
3796 OutputStream *ost = output_streams[j];
3798 if (ost->source_index == ifile->ist_index + i &&
3799 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3800 finish_output_stream(ost);
3804 ifile->eof_reached = 1;
3805 return AVERROR(EAGAIN);
3811 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3812 is->streams[pkt.stream_index]);
3814 /* the following test is needed in case new streams appear
3815 dynamically in stream : we ignore them */
3816 if (pkt.stream_index >= ifile->nb_streams) {
3817 report_new_stream(file_index, &pkt);
3818 goto discard_packet;
3821 ist = input_streams[ifile->ist_index + pkt.stream_index];
3823 ist->data_size += pkt.size;
3827 goto discard_packet;
3829 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3830 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3835 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3836 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3837 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3838 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3839 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3840 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3841 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3842 av_ts2str(input_files[ist->file_index]->ts_offset),
3843 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3846 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3847 int64_t stime, stime2;
3848 // Correcting starttime based on the enabled streams
3849 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3850 // so we instead do it here as part of discontinuity handling
3851 if ( ist->next_dts == AV_NOPTS_VALUE
3852 && ifile->ts_offset == -is->start_time
3853 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3854 int64_t new_start_time = INT64_MAX;
3855 for (i=0; i<is->nb_streams; i++) {
3856 AVStream *st = is->streams[i];
3857 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3859 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3861 if (new_start_time > is->start_time) {
3862 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3863 ifile->ts_offset = -new_start_time;
3867 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3868 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3869 ist->wrap_correction_done = 1;
3871 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3872 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3873 ist->wrap_correction_done = 0;
3875 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3876 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3877 ist->wrap_correction_done = 0;
3881 /* add the stream-global side data to the first packet */
3882 if (ist->nb_packets == 1) {
3883 if (ist->st->nb_side_data)
3884 av_packet_split_side_data(&pkt);
3885 for (i = 0; i < ist->st->nb_side_data; i++) {
3886 AVPacketSideData *src_sd = &ist->st->side_data[i];
3889 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3891 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3894 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3898 memcpy(dst_data, src_sd->data, src_sd->size);
3902 if (pkt.dts != AV_NOPTS_VALUE)
3903 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3904 if (pkt.pts != AV_NOPTS_VALUE)
3905 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3907 if (pkt.pts != AV_NOPTS_VALUE)
3908 pkt.pts *= ist->ts_scale;
3909 if (pkt.dts != AV_NOPTS_VALUE)
3910 pkt.dts *= ist->ts_scale;
3912 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3913 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3914 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3915 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3916 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3917 int64_t delta = pkt_dts - ifile->last_ts;
3918 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3919 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3920 ifile->ts_offset -= delta;
3921 av_log(NULL, AV_LOG_DEBUG,
3922 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3923 delta, ifile->ts_offset);
3924 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3925 if (pkt.pts != AV_NOPTS_VALUE)
3926 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3930 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3931 if (pkt.pts != AV_NOPTS_VALUE) {
3932 pkt.pts += duration;
3933 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3934 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3937 if (pkt.dts != AV_NOPTS_VALUE)
3938 pkt.dts += duration;
3940 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3941 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3942 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3943 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3945 int64_t delta = pkt_dts - ist->next_dts;
3946 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3947 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3948 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3949 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3950 ifile->ts_offset -= delta;
3951 av_log(NULL, AV_LOG_DEBUG,
3952 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3953 delta, ifile->ts_offset);
3954 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3955 if (pkt.pts != AV_NOPTS_VALUE)
3956 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3959 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3960 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3961 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3962 pkt.dts = AV_NOPTS_VALUE;
3964 if (pkt.pts != AV_NOPTS_VALUE){
3965 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3966 delta = pkt_pts - ist->next_dts;
3967 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3968 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3969 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3970 pkt.pts = AV_NOPTS_VALUE;
3976 if (pkt.dts != AV_NOPTS_VALUE)
3977 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3980 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3981 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3982 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3983 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3984 av_ts2str(input_files[ist->file_index]->ts_offset),
3985 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3988 sub2video_heartbeat(ist, pkt.pts);
3990 process_input_packet(ist, &pkt, 0);
3993 av_packet_unref(&pkt);
3999 * Perform a step of transcoding for the specified filter graph.
4001 * @param[in] graph filter graph to consider
4002 * @param[out] best_ist input stream where a frame would allow to continue
4003 * @return 0 for success, <0 for error
4005 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4008 int nb_requests, nb_requests_max = 0;
4009 InputFilter *ifilter;
4013 ret = avfilter_graph_request_oldest(graph->graph);
4015 return reap_filters(0);
4017 if (ret == AVERROR_EOF) {
4018 ret = reap_filters(1);
4019 for (i = 0; i < graph->nb_outputs; i++)
4020 close_output_stream(graph->outputs[i]->ost);
4023 if (ret != AVERROR(EAGAIN))
4026 for (i = 0; i < graph->nb_inputs; i++) {
4027 ifilter = graph->inputs[i];
4029 if (input_files[ist->file_index]->eagain ||
4030 input_files[ist->file_index]->eof_reached)
4032 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4033 if (nb_requests > nb_requests_max) {
4034 nb_requests_max = nb_requests;
4040 for (i = 0; i < graph->nb_outputs; i++)
4041 graph->outputs[i]->ost->unavailable = 1;
4047 * Run a single step of transcoding.
4049 * @return 0 for success, <0 for error
4051 static int transcode_step(void)
4057 ost = choose_output();
4064 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4069 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4074 av_assert0(ost->source_index >= 0);
4075 ist = input_streams[ost->source_index];
4078 ret = process_input(ist->file_index);
4079 if (ret == AVERROR(EAGAIN)) {
4080 if (input_files[ist->file_index]->eagain)
4081 ost->unavailable = 1;
4086 return ret == AVERROR_EOF ? 0 : ret;
4088 return reap_filters(0);
4092 * The following code is the main loop of the file converter
4094 static int transcode(void)
4097 AVFormatContext *os;
4100 int64_t timer_start;
4101 int64_t total_packets_written = 0;
4103 ret = transcode_init();
4107 if (stdin_interaction) {
4108 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4111 timer_start = av_gettime_relative();
4114 if ((ret = init_input_threads()) < 0)
4118 while (!received_sigterm) {
4119 int64_t cur_time= av_gettime_relative();
4121 /* if 'q' pressed, exits */
4122 if (stdin_interaction)
4123 if (check_keyboard_interaction(cur_time) < 0)
4126 /* check if there's any stream where output is still needed */
4127 if (!need_output()) {
4128 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4132 ret = transcode_step();
4133 if (ret < 0 && ret != AVERROR_EOF) {
4135 av_strerror(ret, errbuf, sizeof(errbuf));
4137 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4141 /* dump report by using the output first video and audio streams */
4142 print_report(0, timer_start, cur_time);
4145 free_input_threads();
4148 /* at the end of stream, we must flush the decoder buffers */
4149 for (i = 0; i < nb_input_streams; i++) {
4150 ist = input_streams[i];
4151 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4152 process_input_packet(ist, NULL, 0);
4159 /* write the trailer if needed and close file */
4160 for (i = 0; i < nb_output_files; i++) {
4161 os = output_files[i]->ctx;
4162 if ((ret = av_write_trailer(os)) < 0) {
4163 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4169 /* dump report by using the first video and audio streams */
4170 print_report(1, timer_start, av_gettime_relative());
4172 /* close each encoder */
4173 for (i = 0; i < nb_output_streams; i++) {
4174 ost = output_streams[i];
4175 if (ost->encoding_needed) {
4176 av_freep(&ost->enc_ctx->stats_in);
4178 total_packets_written += ost->packets_written;
4181 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4182 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4186 /* close each decoder */
4187 for (i = 0; i < nb_input_streams; i++) {
4188 ist = input_streams[i];
4189 if (ist->decoding_needed) {
4190 avcodec_close(ist->dec_ctx);
4191 if (ist->hwaccel_uninit)
4192 ist->hwaccel_uninit(ist->dec_ctx);
4201 free_input_threads();
4204 if (output_streams) {
4205 for (i = 0; i < nb_output_streams; i++) {
4206 ost = output_streams[i];
4209 if (fclose(ost->logfile))
4210 av_log(NULL, AV_LOG_ERROR,
4211 "Error closing logfile, loss of information possible: %s\n",
4212 av_err2str(AVERROR(errno)));
4213 ost->logfile = NULL;
4215 av_freep(&ost->forced_kf_pts);
4216 av_freep(&ost->apad);
4217 av_freep(&ost->disposition);
4218 av_dict_free(&ost->encoder_opts);
4219 av_dict_free(&ost->sws_dict);
4220 av_dict_free(&ost->swr_opts);
4221 av_dict_free(&ost->resample_opts);
4229 static int64_t getutime(void)
4232 struct rusage rusage;
4234 getrusage(RUSAGE_SELF, &rusage);
4235 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4236 #elif HAVE_GETPROCESSTIMES
4238 FILETIME c, e, k, u;
4239 proc = GetCurrentProcess();
4240 GetProcessTimes(proc, &c, &e, &k, &u);
4241 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4243 return av_gettime_relative();
4247 static int64_t getmaxrss(void)
4249 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4250 struct rusage rusage;
4251 getrusage(RUSAGE_SELF, &rusage);
4252 return (int64_t)rusage.ru_maxrss * 1024;
4253 #elif HAVE_GETPROCESSMEMORYINFO
4255 PROCESS_MEMORY_COUNTERS memcounters;
4256 proc = GetCurrentProcess();
4257 memcounters.cb = sizeof(memcounters);
4258 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4259 return memcounters.PeakPagefileUsage;
4265 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4269 int main(int argc, char **argv)
4274 register_exit(ffmpeg_cleanup);
4276 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4278 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4279 parse_loglevel(argc, argv, options);
4281 if(argc>1 && !strcmp(argv[1], "-d")){
4283 av_log_set_callback(log_callback_null);
4288 avcodec_register_all();
4290 avdevice_register_all();
4292 avfilter_register_all();
4294 avformat_network_init();
4296 show_banner(argc, argv, options);
4300 /* parse options and open all input/output files */
4301 ret = ffmpeg_parse_options(argc, argv);
4305 if (nb_output_files <= 0 && nb_input_files == 0) {
4307 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4311 /* file converter / grab */
4312 if (nb_output_files <= 0) {
4313 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4317 // if (nb_input_files == 0) {
4318 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4322 current_time = ti = getutime();
4323 if (transcode() < 0)
4325 ti = getutime() - ti;
4327 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4329 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4330 decode_error_stat[0], decode_error_stat[1]);
4331 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4334 exit_program(received_nb_signals ? 255 : main_return_code);
4335 return main_return_code;