2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num) {
676 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
682 av_packet_split_side_data(pkt);
685 AVPacket new_pkt = *pkt;
686 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
689 int a = av_bitstream_filter_filter(bsfc, avctx,
690 bsf_arg ? bsf_arg->value : NULL,
691 &new_pkt.data, &new_pkt.size,
692 pkt->data, pkt->size,
693 pkt->flags & AV_PKT_FLAG_KEY);
694 if(a == 0 && new_pkt.data != pkt->data) {
695 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
697 memcpy(t, new_pkt.data, new_pkt.size);
698 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
706 pkt->side_data = NULL;
707 pkt->side_data_elems = 0;
708 av_packet_unref(pkt);
709 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
710 av_buffer_default_free, NULL, 0);
715 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
716 bsfc->filter->name, pkt->stream_index,
717 avctx->codec ? avctx->codec->name : "copy");
727 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
728 if (pkt->dts != AV_NOPTS_VALUE &&
729 pkt->pts != AV_NOPTS_VALUE &&
730 pkt->dts > pkt->pts) {
731 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
733 ost->file_index, ost->st->index);
735 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
736 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
737 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
740 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
741 pkt->dts != AV_NOPTS_VALUE &&
742 ost->last_mux_dts != AV_NOPTS_VALUE) {
743 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
744 if (pkt->dts < max) {
745 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
746 av_log(s, loglevel, "Non-monotonous DTS in output stream "
747 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
748 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
750 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
753 av_log(s, loglevel, "changing to %"PRId64". This may result "
754 "in incorrect timestamps in the output file.\n",
756 if(pkt->pts >= pkt->dts)
757 pkt->pts = FFMAX(pkt->pts, max);
762 ost->last_mux_dts = pkt->dts;
764 ost->data_size += pkt->size;
765 ost->packets_written++;
767 pkt->stream_index = ost->index;
770 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
771 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
772 av_get_media_type_string(ost->enc_ctx->codec_type),
773 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
774 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
779 ret = av_interleaved_write_frame(s, pkt);
781 print_error("av_interleaved_write_frame()", ret);
782 main_return_code = 1;
783 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
785 av_packet_unref(pkt);
788 static void close_output_stream(OutputStream *ost)
790 OutputFile *of = output_files[ost->file_index];
792 ost->finished |= ENCODER_FINISHED;
794 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
795 of->recording_time = FFMIN(of->recording_time, end);
799 static int check_recording_time(OutputStream *ost)
801 OutputFile *of = output_files[ost->file_index];
803 if (of->recording_time != INT64_MAX &&
804 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
805 AV_TIME_BASE_Q) >= 0) {
806 close_output_stream(ost);
812 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
815 AVCodecContext *enc = ost->enc_ctx;
819 av_init_packet(&pkt);
823 if (!check_recording_time(ost))
826 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
827 frame->pts = ost->sync_opts;
828 ost->sync_opts = frame->pts + frame->nb_samples;
829 ost->samples_encoded += frame->nb_samples;
830 ost->frames_encoded++;
832 av_assert0(pkt.size || !pkt.data);
833 update_benchmark(NULL);
835 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
836 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
837 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
838 enc->time_base.num, enc->time_base.den);
841 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
842 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
845 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
848 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
851 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
852 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
853 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
854 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
857 write_frame(s, &pkt, ost);
861 static void do_subtitle_out(AVFormatContext *s,
866 int subtitle_out_max_size = 1024 * 1024;
867 int subtitle_out_size, nb, i;
872 if (sub->pts == AV_NOPTS_VALUE) {
873 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
882 subtitle_out = av_malloc(subtitle_out_max_size);
884 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
889 /* Note: DVB subtitle need one packet to draw them and one other
890 packet to clear them */
891 /* XXX: signal it in the codec context ? */
892 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
897 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
899 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
900 pts -= output_files[ost->file_index]->start_time;
901 for (i = 0; i < nb; i++) {
902 unsigned save_num_rects = sub->num_rects;
904 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
905 if (!check_recording_time(ost))
909 // start_display_time is required to be 0
910 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
911 sub->end_display_time -= sub->start_display_time;
912 sub->start_display_time = 0;
916 ost->frames_encoded++;
918 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
919 subtitle_out_max_size, sub);
921 sub->num_rects = save_num_rects;
922 if (subtitle_out_size < 0) {
923 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
927 av_init_packet(&pkt);
928 pkt.data = subtitle_out;
929 pkt.size = subtitle_out_size;
930 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
931 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
932 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
933 /* XXX: the pts correction is handled here. Maybe handling
934 it in the codec would be better */
936 pkt.pts += 90 * sub->start_display_time;
938 pkt.pts += 90 * sub->end_display_time;
941 write_frame(s, &pkt, ost);
945 static void do_video_out(AVFormatContext *s,
947 AVFrame *next_picture,
950 int ret, format_video_sync;
952 AVCodecContext *enc = ost->enc_ctx;
953 AVCodecContext *mux_enc = ost->st->codec;
954 int nb_frames, nb0_frames, i;
955 double delta, delta0;
958 InputStream *ist = NULL;
959 AVFilterContext *filter = ost->filter->filter;
961 if (ost->source_index >= 0)
962 ist = input_streams[ost->source_index];
964 if (filter->inputs[0]->frame_rate.num > 0 &&
965 filter->inputs[0]->frame_rate.den > 0)
966 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
968 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
969 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
971 if (!ost->filters_script &&
975 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
976 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
981 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
982 ost->last_nb0_frames[1],
983 ost->last_nb0_frames[2]);
985 delta0 = sync_ipts - ost->sync_opts;
986 delta = delta0 + duration;
988 /* by default, we output a single frame */
992 format_video_sync = video_sync_method;
993 if (format_video_sync == VSYNC_AUTO) {
994 if(!strcmp(s->oformat->name, "avi")) {
995 format_video_sync = VSYNC_VFR;
997 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
999 && format_video_sync == VSYNC_CFR
1000 && input_files[ist->file_index]->ctx->nb_streams == 1
1001 && input_files[ist->file_index]->input_ts_offset == 0) {
1002 format_video_sync = VSYNC_VSCFR;
1004 if (format_video_sync == VSYNC_CFR && copy_ts) {
1005 format_video_sync = VSYNC_VSCFR;
1011 format_video_sync != VSYNC_PASSTHROUGH &&
1012 format_video_sync != VSYNC_DROP) {
1013 double cor = FFMIN(-delta0, duration);
1014 if (delta0 < -0.6) {
1015 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1017 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1023 switch (format_video_sync) {
1025 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1026 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1029 ost->sync_opts = lrint(sync_ipts);
1032 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1033 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1035 } else if (delta < -1.1)
1037 else if (delta > 1.1) {
1038 nb_frames = lrintf(delta);
1040 nb0_frames = lrintf(delta0 - 0.6);
1046 else if (delta > 0.6)
1047 ost->sync_opts = lrint(sync_ipts);
1050 case VSYNC_PASSTHROUGH:
1051 ost->sync_opts = lrint(sync_ipts);
1058 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1059 nb0_frames = FFMIN(nb0_frames, nb_frames);
1061 memmove(ost->last_nb0_frames + 1,
1062 ost->last_nb0_frames,
1063 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1064 ost->last_nb0_frames[0] = nb0_frames;
1066 if (nb0_frames == 0 && ost->last_droped) {
1068 av_log(NULL, AV_LOG_VERBOSE,
1069 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1070 ost->frame_number, ost->st->index, ost->last_frame->pts);
1072 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1073 if (nb_frames > dts_error_threshold * 30) {
1074 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1078 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1079 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1081 ost->last_droped = nb_frames == nb0_frames && next_picture;
1083 /* duplicates frame if needed */
1084 for (i = 0; i < nb_frames; i++) {
1085 AVFrame *in_picture;
1086 av_init_packet(&pkt);
1090 if (i < nb0_frames && ost->last_frame) {
1091 in_picture = ost->last_frame;
1093 in_picture = next_picture;
1098 in_picture->pts = ost->sync_opts;
1101 if (!check_recording_time(ost))
1103 if (ost->frame_number >= ost->max_frames)
1107 #if FF_API_LAVF_FMT_RAWPICTURE
1108 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1109 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1110 /* raw pictures are written as AVPicture structure to
1111 avoid any copies. We support temporarily the older
1113 if (in_picture->interlaced_frame)
1114 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1116 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1117 pkt.data = (uint8_t *)in_picture;
1118 pkt.size = sizeof(AVPicture);
1119 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1120 pkt.flags |= AV_PKT_FLAG_KEY;
1122 write_frame(s, &pkt, ost);
1126 int got_packet, forced_keyframe = 0;
1129 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1130 ost->top_field_first >= 0)
1131 in_picture->top_field_first = !!ost->top_field_first;
1133 if (in_picture->interlaced_frame) {
1134 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1135 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1137 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1139 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1141 in_picture->quality = enc->global_quality;
1142 in_picture->pict_type = 0;
1144 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1145 in_picture->pts * av_q2d(enc->time_base) : NAN;
1146 if (ost->forced_kf_index < ost->forced_kf_count &&
1147 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1148 ost->forced_kf_index++;
1149 forced_keyframe = 1;
1150 } else if (ost->forced_keyframes_pexpr) {
1152 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1153 res = av_expr_eval(ost->forced_keyframes_pexpr,
1154 ost->forced_keyframes_expr_const_values, NULL);
1155 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1156 ost->forced_keyframes_expr_const_values[FKF_N],
1157 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1158 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1159 ost->forced_keyframes_expr_const_values[FKF_T],
1160 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1163 forced_keyframe = 1;
1164 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1165 ost->forced_keyframes_expr_const_values[FKF_N];
1166 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1167 ost->forced_keyframes_expr_const_values[FKF_T];
1168 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1171 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1172 } else if ( ost->forced_keyframes
1173 && !strncmp(ost->forced_keyframes, "source", 6)
1174 && in_picture->key_frame==1) {
1175 forced_keyframe = 1;
1178 if (forced_keyframe) {
1179 in_picture->pict_type = AV_PICTURE_TYPE_I;
1180 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1183 update_benchmark(NULL);
1185 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1186 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1187 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1188 enc->time_base.num, enc->time_base.den);
1191 ost->frames_encoded++;
1193 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1194 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1196 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1202 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1203 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1204 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1205 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1208 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1209 pkt.pts = ost->sync_opts;
1211 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1214 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1215 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1216 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1217 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1220 frame_size = pkt.size;
1221 write_frame(s, &pkt, ost);
1223 /* if two pass, output log */
1224 if (ost->logfile && enc->stats_out) {
1225 fprintf(ost->logfile, "%s", enc->stats_out);
1231 * For video, number of frames in == number of packets out.
1232 * But there may be reordering, so we can't throw away frames on encoder
1233 * flush, we need to limit them here, before they go into encoder.
1235 ost->frame_number++;
1237 if (vstats_filename && frame_size)
1238 do_video_stats(ost, frame_size);
1241 if (!ost->last_frame)
1242 ost->last_frame = av_frame_alloc();
1243 av_frame_unref(ost->last_frame);
1244 if (next_picture && ost->last_frame)
1245 av_frame_ref(ost->last_frame, next_picture);
1247 av_frame_free(&ost->last_frame);
1250 static double psnr(double d)
1252 return -10.0 * log10(d);
1255 static void do_video_stats(OutputStream *ost, int frame_size)
1257 AVCodecContext *enc;
1259 double ti1, bitrate, avg_bitrate;
1261 /* this is executed just the first time do_video_stats is called */
1263 vstats_file = fopen(vstats_filename, "w");
1271 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1272 frame_number = ost->st->nb_frames;
1273 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1274 ost->quality / (float)FF_QP2LAMBDA);
1276 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1277 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1279 fprintf(vstats_file,"f_size= %6d ", frame_size);
1280 /* compute pts value */
1281 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1285 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1286 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1287 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1288 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1289 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1293 static void finish_output_stream(OutputStream *ost)
1295 OutputFile *of = output_files[ost->file_index];
1298 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1301 for (i = 0; i < of->ctx->nb_streams; i++)
1302 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1307 * Get and encode new output from any of the filtergraphs, without causing
1310 * @return 0 for success, <0 for severe errors
1312 static int reap_filters(int flush)
1314 AVFrame *filtered_frame = NULL;
1317 /* Reap all buffers present in the buffer sinks */
1318 for (i = 0; i < nb_output_streams; i++) {
1319 OutputStream *ost = output_streams[i];
1320 OutputFile *of = output_files[ost->file_index];
1321 AVFilterContext *filter;
1322 AVCodecContext *enc = ost->enc_ctx;
1327 filter = ost->filter->filter;
1329 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1330 return AVERROR(ENOMEM);
1332 filtered_frame = ost->filtered_frame;
1335 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1336 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1337 AV_BUFFERSINK_FLAG_NO_REQUEST);
1339 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1340 av_log(NULL, AV_LOG_WARNING,
1341 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1342 } else if (flush && ret == AVERROR_EOF) {
1343 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1344 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1348 if (ost->finished) {
1349 av_frame_unref(filtered_frame);
1352 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1353 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1354 AVRational tb = enc->time_base;
1355 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1357 tb.den <<= extra_bits;
1359 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1360 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1361 float_pts /= 1 << extra_bits;
1362 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1363 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1365 filtered_frame->pts =
1366 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1367 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1369 //if (ost->source_index >= 0)
1370 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1372 switch (filter->inputs[0]->type) {
1373 case AVMEDIA_TYPE_VIDEO:
1374 if (!ost->frame_aspect_ratio.num)
1375 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1378 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1379 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1381 enc->time_base.num, enc->time_base.den);
1384 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1386 case AVMEDIA_TYPE_AUDIO:
1387 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1388 enc->channels != av_frame_get_channels(filtered_frame)) {
1389 av_log(NULL, AV_LOG_ERROR,
1390 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1393 do_audio_out(of->ctx, ost, filtered_frame);
1396 // TODO support subtitle filters
1400 av_frame_unref(filtered_frame);
1407 static void print_final_stats(int64_t total_size)
1409 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1410 uint64_t subtitle_size = 0;
1411 uint64_t data_size = 0;
1412 float percent = -1.0;
1416 for (i = 0; i < nb_output_streams; i++) {
1417 OutputStream *ost = output_streams[i];
1418 switch (ost->enc_ctx->codec_type) {
1419 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1420 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1421 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1422 default: other_size += ost->data_size; break;
1424 extra_size += ost->enc_ctx->extradata_size;
1425 data_size += ost->data_size;
1426 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1427 != AV_CODEC_FLAG_PASS1)
1431 if (data_size && total_size>0 && total_size >= data_size)
1432 percent = 100.0 * (total_size - data_size) / data_size;
1434 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1435 video_size / 1024.0,
1436 audio_size / 1024.0,
1437 subtitle_size / 1024.0,
1438 other_size / 1024.0,
1439 extra_size / 1024.0);
1441 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1443 av_log(NULL, AV_LOG_INFO, "unknown");
1444 av_log(NULL, AV_LOG_INFO, "\n");
1446 /* print verbose per-stream stats */
1447 for (i = 0; i < nb_input_files; i++) {
1448 InputFile *f = input_files[i];
1449 uint64_t total_packets = 0, total_size = 0;
1451 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1452 i, f->ctx->filename);
1454 for (j = 0; j < f->nb_streams; j++) {
1455 InputStream *ist = input_streams[f->ist_index + j];
1456 enum AVMediaType type = ist->dec_ctx->codec_type;
1458 total_size += ist->data_size;
1459 total_packets += ist->nb_packets;
1461 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1462 i, j, media_type_string(type));
1463 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1464 ist->nb_packets, ist->data_size);
1466 if (ist->decoding_needed) {
1467 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1468 ist->frames_decoded);
1469 if (type == AVMEDIA_TYPE_AUDIO)
1470 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1471 av_log(NULL, AV_LOG_VERBOSE, "; ");
1474 av_log(NULL, AV_LOG_VERBOSE, "\n");
1477 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1478 total_packets, total_size);
1481 for (i = 0; i < nb_output_files; i++) {
1482 OutputFile *of = output_files[i];
1483 uint64_t total_packets = 0, total_size = 0;
1485 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1486 i, of->ctx->filename);
1488 for (j = 0; j < of->ctx->nb_streams; j++) {
1489 OutputStream *ost = output_streams[of->ost_index + j];
1490 enum AVMediaType type = ost->enc_ctx->codec_type;
1492 total_size += ost->data_size;
1493 total_packets += ost->packets_written;
1495 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1496 i, j, media_type_string(type));
1497 if (ost->encoding_needed) {
1498 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1499 ost->frames_encoded);
1500 if (type == AVMEDIA_TYPE_AUDIO)
1501 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1502 av_log(NULL, AV_LOG_VERBOSE, "; ");
1505 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1506 ost->packets_written, ost->data_size);
1508 av_log(NULL, AV_LOG_VERBOSE, "\n");
1511 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1512 total_packets, total_size);
1514 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1515 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1517 av_log(NULL, AV_LOG_WARNING, "\n");
1519 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1524 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1527 AVBPrint buf_script;
1529 AVFormatContext *oc;
1531 AVCodecContext *enc;
1532 int frame_number, vid, i;
1534 int64_t pts = INT64_MIN + 1;
1535 static int64_t last_time = -1;
1536 static int qp_histogram[52];
1537 int hours, mins, secs, us;
1539 if (!print_stats && !is_last_report && !progress_avio)
1542 if (!is_last_report) {
1543 if (last_time == -1) {
1544 last_time = cur_time;
1547 if ((cur_time - last_time) < 500000)
1549 last_time = cur_time;
1553 oc = output_files[0]->ctx;
1555 total_size = avio_size(oc->pb);
1556 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1557 total_size = avio_tell(oc->pb);
1561 av_bprint_init(&buf_script, 0, 1);
1562 for (i = 0; i < nb_output_streams; i++) {
1564 ost = output_streams[i];
1566 if (!ost->stream_copy)
1567 q = ost->quality / (float) FF_QP2LAMBDA;
1569 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1570 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1571 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1572 ost->file_index, ost->index, q);
1574 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1575 float fps, t = (cur_time-timer_start) / 1000000.0;
1577 frame_number = ost->frame_number;
1578 fps = t > 1 ? frame_number / t : 0;
1579 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1580 frame_number, fps < 9.95, fps, q);
1581 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1582 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1583 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1584 ost->file_index, ost->index, q);
1586 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1590 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1592 for (j = 0; j < 32; j++)
1593 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1596 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1598 double error, error_sum = 0;
1599 double scale, scale_sum = 0;
1601 char type[3] = { 'Y','U','V' };
1602 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1603 for (j = 0; j < 3; j++) {
1604 if (is_last_report) {
1605 error = enc->error[j];
1606 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1608 error = ost->error[j];
1609 scale = enc->width * enc->height * 255.0 * 255.0;
1615 p = psnr(error / scale);
1616 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1617 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1618 ost->file_index, ost->index, type[j] | 32, p);
1620 p = psnr(error_sum / scale_sum);
1621 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1622 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1623 ost->file_index, ost->index, p);
1627 /* compute min output value */
1628 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1629 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1630 ost->st->time_base, AV_TIME_BASE_Q));
1632 nb_frames_drop += ost->last_droped;
1635 secs = FFABS(pts) / AV_TIME_BASE;
1636 us = FFABS(pts) % AV_TIME_BASE;
1642 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1644 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1646 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1647 "size=%8.0fkB time=", total_size / 1024.0);
1649 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1650 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1651 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1652 (100 * us) / AV_TIME_BASE);
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1656 av_bprintf(&buf_script, "bitrate=N/A\n");
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1659 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1662 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1663 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1664 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1665 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1666 hours, mins, secs, us);
1668 if (nb_frames_dup || nb_frames_drop)
1669 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1670 nb_frames_dup, nb_frames_drop);
1671 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1672 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1674 if (print_stats || is_last_report) {
1675 const char end = is_last_report ? '\n' : '\r';
1676 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1677 fprintf(stderr, "%s %c", buf, end);
1679 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1684 if (progress_avio) {
1685 av_bprintf(&buf_script, "progress=%s\n",
1686 is_last_report ? "end" : "continue");
1687 avio_write(progress_avio, buf_script.str,
1688 FFMIN(buf_script.len, buf_script.size - 1));
1689 avio_flush(progress_avio);
1690 av_bprint_finalize(&buf_script, NULL);
1691 if (is_last_report) {
1692 avio_closep(&progress_avio);
1697 print_final_stats(total_size);
1700 static void flush_encoders(void)
1704 for (i = 0; i < nb_output_streams; i++) {
1705 OutputStream *ost = output_streams[i];
1706 AVCodecContext *enc = ost->enc_ctx;
1707 AVFormatContext *os = output_files[ost->file_index]->ctx;
1708 int stop_encoding = 0;
1710 if (!ost->encoding_needed)
1713 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1715 #if FF_API_LAVF_FMT_RAWPICTURE
1716 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1721 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1724 switch (enc->codec_type) {
1725 case AVMEDIA_TYPE_AUDIO:
1726 encode = avcodec_encode_audio2;
1729 case AVMEDIA_TYPE_VIDEO:
1730 encode = avcodec_encode_video2;
1741 av_init_packet(&pkt);
1745 update_benchmark(NULL);
1746 ret = encode(enc, &pkt, NULL, &got_packet);
1747 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1749 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1754 if (ost->logfile && enc->stats_out) {
1755 fprintf(ost->logfile, "%s", enc->stats_out);
1761 if (ost->finished & MUXER_FINISHED) {
1762 av_packet_unref(&pkt);
1765 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1766 pkt_size = pkt.size;
1767 write_frame(os, &pkt, ost);
1768 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1769 do_video_stats(ost, pkt_size);
1780 * Check whether a packet from ist should be written into ost at this time
1782 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1784 OutputFile *of = output_files[ost->file_index];
1785 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1787 if (ost->source_index != ist_index)
1793 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1799 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1801 OutputFile *of = output_files[ost->file_index];
1802 InputFile *f = input_files [ist->file_index];
1803 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1804 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1805 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1809 av_init_packet(&opkt);
1811 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1812 !ost->copy_initial_nonkeyframes)
1815 if (pkt->pts == AV_NOPTS_VALUE) {
1816 if (!ost->frame_number && ist->pts < start_time &&
1817 !ost->copy_prior_start)
1820 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1821 !ost->copy_prior_start)
1825 if (of->recording_time != INT64_MAX &&
1826 ist->pts >= of->recording_time + start_time) {
1827 close_output_stream(ost);
1831 if (f->recording_time != INT64_MAX) {
1832 start_time = f->ctx->start_time;
1833 if (f->start_time != AV_NOPTS_VALUE)
1834 start_time += f->start_time;
1835 if (ist->pts >= f->recording_time + start_time) {
1836 close_output_stream(ost);
1841 /* force the input stream PTS */
1842 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1845 if (pkt->pts != AV_NOPTS_VALUE)
1846 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1848 opkt.pts = AV_NOPTS_VALUE;
1850 if (pkt->dts == AV_NOPTS_VALUE)
1851 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1853 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1854 opkt.dts -= ost_tb_start_time;
1856 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1857 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1859 duration = ist->dec_ctx->frame_size;
1860 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1861 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1862 ost->st->time_base) - ost_tb_start_time;
1865 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1866 opkt.flags = pkt->flags;
1867 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1868 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1869 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1870 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1871 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1873 int ret = av_parser_change(ost->parser, ost->st->codec,
1874 &opkt.data, &opkt.size,
1875 pkt->data, pkt->size,
1876 pkt->flags & AV_PKT_FLAG_KEY);
1878 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1883 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1888 opkt.data = pkt->data;
1889 opkt.size = pkt->size;
1891 av_copy_packet_side_data(&opkt, pkt);
1893 #if FF_API_LAVF_FMT_RAWPICTURE
1894 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1895 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1896 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1897 /* store AVPicture in AVPacket, as expected by the output format */
1898 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1900 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1904 opkt.data = (uint8_t *)&pict;
1905 opkt.size = sizeof(AVPicture);
1906 opkt.flags |= AV_PKT_FLAG_KEY;
1910 write_frame(of->ctx, &opkt, ost);
1913 int guess_input_channel_layout(InputStream *ist)
1915 AVCodecContext *dec = ist->dec_ctx;
1917 if (!dec->channel_layout) {
1918 char layout_name[256];
1920 if (dec->channels > ist->guess_layout_max)
1922 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1923 if (!dec->channel_layout)
1925 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1926 dec->channels, dec->channel_layout);
1927 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1928 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1933 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1935 if (*got_output || ret<0)
1936 decode_error_stat[ret<0] ++;
1938 if (ret < 0 && exit_on_error)
1941 if (exit_on_error && *got_output && ist) {
1942 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1943 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1949 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1951 AVFrame *decoded_frame, *f;
1952 AVCodecContext *avctx = ist->dec_ctx;
1953 int i, ret, err = 0, resample_changed;
1954 AVRational decoded_frame_tb;
1956 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1957 return AVERROR(ENOMEM);
1958 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1959 return AVERROR(ENOMEM);
1960 decoded_frame = ist->decoded_frame;
1962 update_benchmark(NULL);
1963 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1964 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1966 if (ret >= 0 && avctx->sample_rate <= 0) {
1967 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1968 ret = AVERROR_INVALIDDATA;
1971 check_decode_result(ist, got_output, ret);
1973 if (!*got_output || ret < 0)
1976 ist->samples_decoded += decoded_frame->nb_samples;
1977 ist->frames_decoded++;
1980 /* increment next_dts to use for the case where the input stream does not
1981 have timestamps or there are multiple frames in the packet */
1982 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1984 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1988 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1989 ist->resample_channels != avctx->channels ||
1990 ist->resample_channel_layout != decoded_frame->channel_layout ||
1991 ist->resample_sample_rate != decoded_frame->sample_rate;
1992 if (resample_changed) {
1993 char layout1[64], layout2[64];
1995 if (!guess_input_channel_layout(ist)) {
1996 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1997 "layout for Input Stream #%d.%d\n", ist->file_index,
2001 decoded_frame->channel_layout = avctx->channel_layout;
2003 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2004 ist->resample_channel_layout);
2005 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2006 decoded_frame->channel_layout);
2008 av_log(NULL, AV_LOG_INFO,
2009 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2010 ist->file_index, ist->st->index,
2011 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2012 ist->resample_channels, layout1,
2013 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2014 avctx->channels, layout2);
2016 ist->resample_sample_fmt = decoded_frame->format;
2017 ist->resample_sample_rate = decoded_frame->sample_rate;
2018 ist->resample_channel_layout = decoded_frame->channel_layout;
2019 ist->resample_channels = avctx->channels;
2021 for (i = 0; i < nb_filtergraphs; i++)
2022 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2023 FilterGraph *fg = filtergraphs[i];
2024 if (configure_filtergraph(fg) < 0) {
2025 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2031 /* if the decoder provides a pts, use it instead of the last packet pts.
2032 the decoder could be delaying output by a packet or more. */
2033 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2034 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2035 decoded_frame_tb = avctx->time_base;
2036 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2037 decoded_frame->pts = decoded_frame->pkt_pts;
2038 decoded_frame_tb = ist->st->time_base;
2039 } else if (pkt->pts != AV_NOPTS_VALUE) {
2040 decoded_frame->pts = pkt->pts;
2041 decoded_frame_tb = ist->st->time_base;
2043 decoded_frame->pts = ist->dts;
2044 decoded_frame_tb = AV_TIME_BASE_Q;
2046 pkt->pts = AV_NOPTS_VALUE;
2047 if (decoded_frame->pts != AV_NOPTS_VALUE)
2048 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2049 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2050 (AVRational){1, avctx->sample_rate});
2051 ist->nb_samples = decoded_frame->nb_samples;
2052 for (i = 0; i < ist->nb_filters; i++) {
2053 if (i < ist->nb_filters - 1) {
2054 f = ist->filter_frame;
2055 err = av_frame_ref(f, decoded_frame);
2060 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2061 AV_BUFFERSRC_FLAG_PUSH);
2062 if (err == AVERROR_EOF)
2063 err = 0; /* ignore */
2067 decoded_frame->pts = AV_NOPTS_VALUE;
2069 av_frame_unref(ist->filter_frame);
2070 av_frame_unref(decoded_frame);
2071 return err < 0 ? err : ret;
2074 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2076 AVFrame *decoded_frame, *f;
2077 int i, ret = 0, err = 0, resample_changed;
2078 int64_t best_effort_timestamp;
2079 AVRational *frame_sample_aspect;
2081 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2082 return AVERROR(ENOMEM);
2083 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2084 return AVERROR(ENOMEM);
2085 decoded_frame = ist->decoded_frame;
2086 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2088 update_benchmark(NULL);
2089 ret = avcodec_decode_video2(ist->dec_ctx,
2090 decoded_frame, got_output, pkt);
2091 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2093 // The following line may be required in some cases where there is no parser
2094 // or the parser does not has_b_frames correctly
2095 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2096 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2097 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2099 av_log(ist->dec_ctx, AV_LOG_WARNING,
2100 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2101 "If you want to help, upload a sample "
2102 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2103 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2104 ist->dec_ctx->has_b_frames,
2105 ist->st->codec->has_b_frames);
2108 check_decode_result(ist, got_output, ret);
2110 if (*got_output && ret >= 0) {
2111 if (ist->dec_ctx->width != decoded_frame->width ||
2112 ist->dec_ctx->height != decoded_frame->height ||
2113 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2114 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2115 decoded_frame->width,
2116 decoded_frame->height,
2117 decoded_frame->format,
2118 ist->dec_ctx->width,
2119 ist->dec_ctx->height,
2120 ist->dec_ctx->pix_fmt);
2124 if (!*got_output || ret < 0)
2127 if(ist->top_field_first>=0)
2128 decoded_frame->top_field_first = ist->top_field_first;
2130 ist->frames_decoded++;
2132 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2133 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2137 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2139 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2140 if(best_effort_timestamp != AV_NOPTS_VALUE)
2141 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2144 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2145 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2146 ist->st->index, av_ts2str(decoded_frame->pts),
2147 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2148 best_effort_timestamp,
2149 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2150 decoded_frame->key_frame, decoded_frame->pict_type,
2151 ist->st->time_base.num, ist->st->time_base.den);
2156 if (ist->st->sample_aspect_ratio.num)
2157 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2159 resample_changed = ist->resample_width != decoded_frame->width ||
2160 ist->resample_height != decoded_frame->height ||
2161 ist->resample_pix_fmt != decoded_frame->format;
2162 if (resample_changed) {
2163 av_log(NULL, AV_LOG_INFO,
2164 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2165 ist->file_index, ist->st->index,
2166 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2167 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2169 ist->resample_width = decoded_frame->width;
2170 ist->resample_height = decoded_frame->height;
2171 ist->resample_pix_fmt = decoded_frame->format;
2173 for (i = 0; i < nb_filtergraphs; i++) {
2174 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2175 configure_filtergraph(filtergraphs[i]) < 0) {
2176 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2182 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2183 for (i = 0; i < ist->nb_filters; i++) {
2184 if (!frame_sample_aspect->num)
2185 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2187 if (i < ist->nb_filters - 1) {
2188 f = ist->filter_frame;
2189 err = av_frame_ref(f, decoded_frame);
2194 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2195 if (ret == AVERROR_EOF) {
2196 ret = 0; /* ignore */
2197 } else if (ret < 0) {
2198 av_log(NULL, AV_LOG_FATAL,
2199 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2205 av_frame_unref(ist->filter_frame);
2206 av_frame_unref(decoded_frame);
2207 return err < 0 ? err : ret;
2210 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2212 AVSubtitle subtitle;
2213 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2214 &subtitle, got_output, pkt);
2216 check_decode_result(NULL, got_output, ret);
2218 if (ret < 0 || !*got_output) {
2220 sub2video_flush(ist);
2224 if (ist->fix_sub_duration) {
2226 if (ist->prev_sub.got_output) {
2227 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2228 1000, AV_TIME_BASE);
2229 if (end < ist->prev_sub.subtitle.end_display_time) {
2230 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2231 "Subtitle duration reduced from %d to %d%s\n",
2232 ist->prev_sub.subtitle.end_display_time, end,
2233 end <= 0 ? ", dropping it" : "");
2234 ist->prev_sub.subtitle.end_display_time = end;
2237 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2238 FFSWAP(int, ret, ist->prev_sub.ret);
2239 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2247 sub2video_update(ist, &subtitle);
2249 if (!subtitle.num_rects)
2252 ist->frames_decoded++;
2254 for (i = 0; i < nb_output_streams; i++) {
2255 OutputStream *ost = output_streams[i];
2257 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2258 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2261 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2265 avsubtitle_free(&subtitle);
2269 static int send_filter_eof(InputStream *ist)
2272 for (i = 0; i < ist->nb_filters; i++) {
2273 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2280 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2281 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2287 if (!ist->saw_first_ts) {
2288 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2290 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2291 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2292 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2294 ist->saw_first_ts = 1;
2297 if (ist->next_dts == AV_NOPTS_VALUE)
2298 ist->next_dts = ist->dts;
2299 if (ist->next_pts == AV_NOPTS_VALUE)
2300 ist->next_pts = ist->pts;
2304 av_init_packet(&avpkt);
2312 if (pkt->dts != AV_NOPTS_VALUE) {
2313 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2314 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2315 ist->next_pts = ist->pts = ist->dts;
2318 // while we have more to decode or while the decoder did output something on EOF
2319 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2323 ist->pts = ist->next_pts;
2324 ist->dts = ist->next_dts;
2326 if (avpkt.size && avpkt.size != pkt->size &&
2327 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2328 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2329 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2330 ist->showed_multi_packet_warning = 1;
2333 switch (ist->dec_ctx->codec_type) {
2334 case AVMEDIA_TYPE_AUDIO:
2335 ret = decode_audio (ist, &avpkt, &got_output);
2337 case AVMEDIA_TYPE_VIDEO:
2338 ret = decode_video (ist, &avpkt, &got_output);
2339 if (avpkt.duration) {
2340 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2341 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2342 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2343 duration = ((int64_t)AV_TIME_BASE *
2344 ist->dec_ctx->framerate.den * ticks) /
2345 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2349 if(ist->dts != AV_NOPTS_VALUE && duration) {
2350 ist->next_dts += duration;
2352 ist->next_dts = AV_NOPTS_VALUE;
2355 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2357 case AVMEDIA_TYPE_SUBTITLE:
2358 ret = transcode_subtitles(ist, &avpkt, &got_output);
2365 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2366 ist->file_index, ist->st->index, av_err2str(ret));
2373 avpkt.pts= AV_NOPTS_VALUE;
2375 // touch data and size only if not EOF
2377 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2385 if (got_output && !pkt)
2389 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2390 /* except when looping we need to flush but not to send an EOF */
2391 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2392 int ret = send_filter_eof(ist);
2394 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2399 /* handle stream copy */
2400 if (!ist->decoding_needed) {
2401 ist->dts = ist->next_dts;
2402 switch (ist->dec_ctx->codec_type) {
2403 case AVMEDIA_TYPE_AUDIO:
2404 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2405 ist->dec_ctx->sample_rate;
2407 case AVMEDIA_TYPE_VIDEO:
2408 if (ist->framerate.num) {
2409 // TODO: Remove work-around for c99-to-c89 issue 7
2410 AVRational time_base_q = AV_TIME_BASE_Q;
2411 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2412 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2413 } else if (pkt->duration) {
2414 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2415 } else if(ist->dec_ctx->framerate.num != 0) {
2416 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2417 ist->next_dts += ((int64_t)AV_TIME_BASE *
2418 ist->dec_ctx->framerate.den * ticks) /
2419 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2423 ist->pts = ist->dts;
2424 ist->next_pts = ist->next_dts;
2426 for (i = 0; pkt && i < nb_output_streams; i++) {
2427 OutputStream *ost = output_streams[i];
2429 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2432 do_streamcopy(ist, ost, pkt);
2438 static void print_sdp(void)
2443 AVIOContext *sdp_pb;
2444 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2448 for (i = 0, j = 0; i < nb_output_files; i++) {
2449 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2450 avc[j] = output_files[i]->ctx;
2458 av_sdp_create(avc, j, sdp, sizeof(sdp));
2460 if (!sdp_filename) {
2461 printf("SDP:\n%s\n", sdp);
2464 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2465 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2467 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2468 avio_closep(&sdp_pb);
2469 av_freep(&sdp_filename);
2477 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2480 for (i = 0; hwaccels[i].name; i++)
2481 if (hwaccels[i].pix_fmt == pix_fmt)
2482 return &hwaccels[i];
2486 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2488 InputStream *ist = s->opaque;
2489 const enum AVPixelFormat *p;
2492 for (p = pix_fmts; *p != -1; p++) {
2493 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2494 const HWAccel *hwaccel;
2496 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2499 hwaccel = get_hwaccel(*p);
2501 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2502 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2505 ret = hwaccel->init(s);
2507 if (ist->hwaccel_id == hwaccel->id) {
2508 av_log(NULL, AV_LOG_FATAL,
2509 "%s hwaccel requested for input stream #%d:%d, "
2510 "but cannot be initialized.\n", hwaccel->name,
2511 ist->file_index, ist->st->index);
2512 return AV_PIX_FMT_NONE;
2516 ist->active_hwaccel_id = hwaccel->id;
2517 ist->hwaccel_pix_fmt = *p;
2524 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2526 InputStream *ist = s->opaque;
2528 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2529 return ist->hwaccel_get_buffer(s, frame, flags);
2531 return avcodec_default_get_buffer2(s, frame, flags);
2534 static int init_input_stream(int ist_index, char *error, int error_len)
2537 InputStream *ist = input_streams[ist_index];
2539 if (ist->decoding_needed) {
2540 AVCodec *codec = ist->dec;
2542 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2543 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2544 return AVERROR(EINVAL);
2547 ist->dec_ctx->opaque = ist;
2548 ist->dec_ctx->get_format = get_format;
2549 ist->dec_ctx->get_buffer2 = get_buffer;
2550 ist->dec_ctx->thread_safe_callbacks = 1;
2552 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2553 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2554 (ist->decoding_needed & DECODING_FOR_OST)) {
2555 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2556 if (ist->decoding_needed & DECODING_FOR_FILTER)
2557 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2560 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2561 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2562 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2563 if (ret == AVERROR_EXPERIMENTAL)
2564 abort_codec_experimental(codec, 0);
2566 snprintf(error, error_len,
2567 "Error while opening decoder for input stream "
2569 ist->file_index, ist->st->index, av_err2str(ret));
2572 assert_avoptions(ist->decoder_opts);
2575 ist->next_pts = AV_NOPTS_VALUE;
2576 ist->next_dts = AV_NOPTS_VALUE;
2581 static InputStream *get_input_stream(OutputStream *ost)
2583 if (ost->source_index >= 0)
2584 return input_streams[ost->source_index];
2588 static int compare_int64(const void *a, const void *b)
2590 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2593 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2597 if (ost->encoding_needed) {
2598 AVCodec *codec = ost->enc;
2599 AVCodecContext *dec = NULL;
2602 if ((ist = get_input_stream(ost)))
2604 if (dec && dec->subtitle_header) {
2605 /* ASS code assumes this buffer is null terminated so add extra byte. */
2606 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2607 if (!ost->enc_ctx->subtitle_header)
2608 return AVERROR(ENOMEM);
2609 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2610 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2612 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2613 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2614 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2616 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2617 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2618 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2620 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2621 if (ret == AVERROR_EXPERIMENTAL)
2622 abort_codec_experimental(codec, 1);
2623 snprintf(error, error_len,
2624 "Error while opening encoder for output stream #%d:%d - "
2625 "maybe incorrect parameters such as bit_rate, rate, width or height",
2626 ost->file_index, ost->index);
2629 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2630 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2631 av_buffersink_set_frame_size(ost->filter->filter,
2632 ost->enc_ctx->frame_size);
2633 assert_avoptions(ost->encoder_opts);
2634 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2635 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2636 " It takes bits/s as argument, not kbits/s\n");
2638 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2640 av_log(NULL, AV_LOG_FATAL,
2641 "Error initializing the output stream codec context.\n");
2645 // copy timebase while removing common factors
2646 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2647 ost->st->codec->codec= ost->enc_ctx->codec;
2649 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2651 av_log(NULL, AV_LOG_FATAL,
2652 "Error setting up codec context options.\n");
2655 // copy timebase while removing common factors
2656 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2662 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2663 AVCodecContext *avctx)
2666 int n = 1, i, size, index = 0;
2669 for (p = kf; *p; p++)
2673 pts = av_malloc_array(size, sizeof(*pts));
2675 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2680 for (i = 0; i < n; i++) {
2681 char *next = strchr(p, ',');
2686 if (!memcmp(p, "chapters", 8)) {
2688 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2691 if (avf->nb_chapters > INT_MAX - size ||
2692 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2694 av_log(NULL, AV_LOG_FATAL,
2695 "Could not allocate forced key frames array.\n");
2698 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2699 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2701 for (j = 0; j < avf->nb_chapters; j++) {
2702 AVChapter *c = avf->chapters[j];
2703 av_assert1(index < size);
2704 pts[index++] = av_rescale_q(c->start, c->time_base,
2705 avctx->time_base) + t;
2710 t = parse_time_or_die("force_key_frames", p, 1);
2711 av_assert1(index < size);
2712 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2719 av_assert0(index == size);
2720 qsort(pts, size, sizeof(*pts), compare_int64);
2721 ost->forced_kf_count = size;
2722 ost->forced_kf_pts = pts;
2725 static void report_new_stream(int input_index, AVPacket *pkt)
2727 InputFile *file = input_files[input_index];
2728 AVStream *st = file->ctx->streams[pkt->stream_index];
2730 if (pkt->stream_index < file->nb_streams_warn)
2732 av_log(file->ctx, AV_LOG_WARNING,
2733 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2734 av_get_media_type_string(st->codec->codec_type),
2735 input_index, pkt->stream_index,
2736 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2737 file->nb_streams_warn = pkt->stream_index + 1;
2740 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2742 AVDictionaryEntry *e;
2744 uint8_t *encoder_string;
2745 int encoder_string_len;
2746 int format_flags = 0;
2747 int codec_flags = 0;
2749 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2752 e = av_dict_get(of->opts, "fflags", NULL, 0);
2754 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2757 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2759 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2761 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2764 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2767 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2768 encoder_string = av_mallocz(encoder_string_len);
2769 if (!encoder_string)
2772 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2773 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2775 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2776 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2777 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2778 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2781 static int transcode_init(void)
2783 int ret = 0, i, j, k;
2784 AVFormatContext *oc;
2787 char error[1024] = {0};
2790 for (i = 0; i < nb_filtergraphs; i++) {
2791 FilterGraph *fg = filtergraphs[i];
2792 for (j = 0; j < fg->nb_outputs; j++) {
2793 OutputFilter *ofilter = fg->outputs[j];
2794 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2796 if (fg->nb_inputs != 1)
2798 for (k = nb_input_streams-1; k >= 0 ; k--)
2799 if (fg->inputs[0]->ist == input_streams[k])
2801 ofilter->ost->source_index = k;
2805 /* init framerate emulation */
2806 for (i = 0; i < nb_input_files; i++) {
2807 InputFile *ifile = input_files[i];
2808 if (ifile->rate_emu)
2809 for (j = 0; j < ifile->nb_streams; j++)
2810 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2813 /* for each output stream, we compute the right encoding parameters */
2814 for (i = 0; i < nb_output_streams; i++) {
2815 AVCodecContext *enc_ctx;
2816 AVCodecContext *dec_ctx = NULL;
2817 ost = output_streams[i];
2818 oc = output_files[ost->file_index]->ctx;
2819 ist = get_input_stream(ost);
2821 if (ost->attachment_filename)
2824 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2827 dec_ctx = ist->dec_ctx;
2829 ost->st->disposition = ist->st->disposition;
2830 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2831 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2833 for (j=0; j<oc->nb_streams; j++) {
2834 AVStream *st = oc->streams[j];
2835 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2838 if (j == oc->nb_streams)
2839 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2840 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2843 if (ost->stream_copy) {
2845 uint64_t extra_size;
2847 av_assert0(ist && !ost->filter);
2849 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2851 if (extra_size > INT_MAX) {
2852 return AVERROR(EINVAL);
2855 /* if stream_copy is selected, no need to decode or encode */
2856 enc_ctx->codec_id = dec_ctx->codec_id;
2857 enc_ctx->codec_type = dec_ctx->codec_type;
2859 if (!enc_ctx->codec_tag) {
2860 unsigned int codec_tag;
2861 if (!oc->oformat->codec_tag ||
2862 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2863 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2864 enc_ctx->codec_tag = dec_ctx->codec_tag;
2867 enc_ctx->bit_rate = dec_ctx->bit_rate;
2868 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2869 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2870 enc_ctx->field_order = dec_ctx->field_order;
2871 if (dec_ctx->extradata_size) {
2872 enc_ctx->extradata = av_mallocz(extra_size);
2873 if (!enc_ctx->extradata) {
2874 return AVERROR(ENOMEM);
2876 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2878 enc_ctx->extradata_size= dec_ctx->extradata_size;
2879 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2881 enc_ctx->time_base = ist->st->time_base;
2883 * Avi is a special case here because it supports variable fps but
2884 * having the fps and timebase differe significantly adds quite some
2887 if(!strcmp(oc->oformat->name, "avi")) {
2888 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2889 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2890 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2891 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2893 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2894 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2895 enc_ctx->ticks_per_frame = 2;
2896 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2897 && av_q2d(ist->st->time_base) < 1.0/500
2899 enc_ctx->time_base = dec_ctx->time_base;
2900 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2901 enc_ctx->time_base.den *= 2;
2902 enc_ctx->ticks_per_frame = 2;
2904 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2905 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2906 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2907 && strcmp(oc->oformat->name, "f4v")
2909 if( copy_tb<0 && dec_ctx->time_base.den
2910 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2911 && av_q2d(ist->st->time_base) < 1.0/500
2913 enc_ctx->time_base = dec_ctx->time_base;
2914 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2917 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2918 && dec_ctx->time_base.num < dec_ctx->time_base.den
2919 && dec_ctx->time_base.num > 0
2920 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2921 enc_ctx->time_base = dec_ctx->time_base;
2924 if (!ost->frame_rate.num)
2925 ost->frame_rate = ist->framerate;
2926 if(ost->frame_rate.num)
2927 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2929 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2930 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2932 if (ist->st->nb_side_data) {
2933 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2934 sizeof(*ist->st->side_data));
2935 if (!ost->st->side_data)
2936 return AVERROR(ENOMEM);
2938 ost->st->nb_side_data = 0;
2939 for (j = 0; j < ist->st->nb_side_data; j++) {
2940 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2941 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2943 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2946 sd_dst->data = av_malloc(sd_src->size);
2948 return AVERROR(ENOMEM);
2949 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2950 sd_dst->size = sd_src->size;
2951 sd_dst->type = sd_src->type;
2952 ost->st->nb_side_data++;
2956 ost->parser = av_parser_init(enc_ctx->codec_id);
2958 switch (enc_ctx->codec_type) {
2959 case AVMEDIA_TYPE_AUDIO:
2960 if (audio_volume != 256) {
2961 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2964 enc_ctx->channel_layout = dec_ctx->channel_layout;
2965 enc_ctx->sample_rate = dec_ctx->sample_rate;
2966 enc_ctx->channels = dec_ctx->channels;
2967 enc_ctx->frame_size = dec_ctx->frame_size;
2968 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2969 enc_ctx->block_align = dec_ctx->block_align;
2970 enc_ctx->initial_padding = dec_ctx->delay;
2971 #if FF_API_AUDIOENC_DELAY
2972 enc_ctx->delay = dec_ctx->delay;
2974 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2975 enc_ctx->block_align= 0;
2976 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2977 enc_ctx->block_align= 0;
2979 case AVMEDIA_TYPE_VIDEO:
2980 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2981 enc_ctx->width = dec_ctx->width;
2982 enc_ctx->height = dec_ctx->height;
2983 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2984 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2986 av_mul_q(ost->frame_aspect_ratio,
2987 (AVRational){ enc_ctx->height, enc_ctx->width });
2988 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2989 "with stream copy may produce invalid files\n");
2991 else if (ist->st->sample_aspect_ratio.num)
2992 sar = ist->st->sample_aspect_ratio;
2994 sar = dec_ctx->sample_aspect_ratio;
2995 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2996 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2997 ost->st->r_frame_rate = ist->st->r_frame_rate;
2999 case AVMEDIA_TYPE_SUBTITLE:
3000 enc_ctx->width = dec_ctx->width;
3001 enc_ctx->height = dec_ctx->height;
3003 case AVMEDIA_TYPE_UNKNOWN:
3004 case AVMEDIA_TYPE_DATA:
3005 case AVMEDIA_TYPE_ATTACHMENT:
3012 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3014 /* should only happen when a default codec is not present. */
3015 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3016 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3017 ret = AVERROR(EINVAL);
3021 set_encoder_id(output_files[ost->file_index], ost);
3024 if (qsv_transcode_init(ost))
3029 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3030 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3032 fg = init_simple_filtergraph(ist, ost);
3033 if (configure_filtergraph(fg)) {
3034 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3039 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3040 if (!ost->frame_rate.num)
3041 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3042 if (ist && !ost->frame_rate.num)
3043 ost->frame_rate = ist->framerate;
3044 if (ist && !ost->frame_rate.num)
3045 ost->frame_rate = ist->st->r_frame_rate;
3046 if (ist && !ost->frame_rate.num) {
3047 ost->frame_rate = (AVRational){25, 1};
3048 av_log(NULL, AV_LOG_WARNING,
3050 "about the input framerate is available. Falling "
3051 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3052 "if you want a different framerate.\n",
3053 ost->file_index, ost->index);
3055 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3056 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3057 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3058 ost->frame_rate = ost->enc->supported_framerates[idx];
3060 // reduce frame rate for mpeg4 to be within the spec limits
3061 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3062 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3063 ost->frame_rate.num, ost->frame_rate.den, 65535);
3067 switch (enc_ctx->codec_type) {
3068 case AVMEDIA_TYPE_AUDIO:
3069 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3070 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3071 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3072 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3073 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3075 case AVMEDIA_TYPE_VIDEO:
3076 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3077 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3078 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3079 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3080 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3081 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3082 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3084 for (j = 0; j < ost->forced_kf_count; j++)
3085 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3087 enc_ctx->time_base);
3089 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3090 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3091 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3092 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3093 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3094 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3095 if (!strncmp(ost->enc->name, "libx264", 7) &&
3096 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3097 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3098 av_log(NULL, AV_LOG_WARNING,
3099 "No pixel format specified, %s for H.264 encoding chosen.\n"
3100 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3101 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3102 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3103 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3104 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3105 av_log(NULL, AV_LOG_WARNING,
3106 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3107 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3108 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3109 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3111 ost->st->avg_frame_rate = ost->frame_rate;
3114 enc_ctx->width != dec_ctx->width ||
3115 enc_ctx->height != dec_ctx->height ||
3116 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3117 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3120 if (ost->forced_keyframes) {
3121 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3122 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3123 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3125 av_log(NULL, AV_LOG_ERROR,
3126 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3129 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3130 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3132 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3134 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3135 // parse it only for static kf timings
3136 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3137 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3141 case AVMEDIA_TYPE_SUBTITLE:
3142 enc_ctx->time_base = (AVRational){1, 1000};
3143 if (!enc_ctx->width) {
3144 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3145 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3148 case AVMEDIA_TYPE_DATA:
3156 if (ost->disposition) {
3157 static const AVOption opts[] = {
3158 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3159 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3160 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3161 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3162 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3163 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3164 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3165 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3166 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3167 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3168 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3169 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3170 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3171 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3174 static const AVClass class = {
3176 .item_name = av_default_item_name,
3178 .version = LIBAVUTIL_VERSION_INT,
3180 const AVClass *pclass = &class;
3182 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3188 /* open each encoder */
3189 for (i = 0; i < nb_output_streams; i++) {
3190 ret = init_output_stream(output_streams[i], error, sizeof(error));
3195 /* init input streams */
3196 for (i = 0; i < nb_input_streams; i++)
3197 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3198 for (i = 0; i < nb_output_streams; i++) {
3199 ost = output_streams[i];
3200 avcodec_close(ost->enc_ctx);
3205 /* discard unused programs */
3206 for (i = 0; i < nb_input_files; i++) {
3207 InputFile *ifile = input_files[i];
3208 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3209 AVProgram *p = ifile->ctx->programs[j];
3210 int discard = AVDISCARD_ALL;
3212 for (k = 0; k < p->nb_stream_indexes; k++)
3213 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3214 discard = AVDISCARD_DEFAULT;
3217 p->discard = discard;
3221 /* open files and write file headers */
3222 for (i = 0; i < nb_output_files; i++) {
3223 oc = output_files[i]->ctx;
3224 oc->interrupt_callback = int_cb;
3225 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3226 snprintf(error, sizeof(error),
3227 "Could not write header for output file #%d "
3228 "(incorrect codec parameters ?): %s",
3229 i, av_err2str(ret));
3230 ret = AVERROR(EINVAL);
3233 // assert_avoptions(output_files[i]->opts);
3234 if (strcmp(oc->oformat->name, "rtp")) {
3240 /* dump the file output parameters - cannot be done before in case
3242 for (i = 0; i < nb_output_files; i++) {
3243 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3246 /* dump the stream mapping */
3247 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3248 for (i = 0; i < nb_input_streams; i++) {
3249 ist = input_streams[i];
3251 for (j = 0; j < ist->nb_filters; j++) {
3252 if (ist->filters[j]->graph->graph_desc) {
3253 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3254 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3255 ist->filters[j]->name);
3256 if (nb_filtergraphs > 1)
3257 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3258 av_log(NULL, AV_LOG_INFO, "\n");
3263 for (i = 0; i < nb_output_streams; i++) {
3264 ost = output_streams[i];
3266 if (ost->attachment_filename) {
3267 /* an attached file */
3268 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3269 ost->attachment_filename, ost->file_index, ost->index);
3273 if (ost->filter && ost->filter->graph->graph_desc) {
3274 /* output from a complex graph */
3275 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3276 if (nb_filtergraphs > 1)
3277 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3279 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3280 ost->index, ost->enc ? ost->enc->name : "?");
3284 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3285 input_streams[ost->source_index]->file_index,
3286 input_streams[ost->source_index]->st->index,
3289 if (ost->sync_ist != input_streams[ost->source_index])
3290 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3291 ost->sync_ist->file_index,
3292 ost->sync_ist->st->index);
3293 if (ost->stream_copy)
3294 av_log(NULL, AV_LOG_INFO, " (copy)");
3296 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3297 const AVCodec *out_codec = ost->enc;
3298 const char *decoder_name = "?";
3299 const char *in_codec_name = "?";
3300 const char *encoder_name = "?";
3301 const char *out_codec_name = "?";
3302 const AVCodecDescriptor *desc;
3305 decoder_name = in_codec->name;
3306 desc = avcodec_descriptor_get(in_codec->id);
3308 in_codec_name = desc->name;
3309 if (!strcmp(decoder_name, in_codec_name))
3310 decoder_name = "native";
3314 encoder_name = out_codec->name;
3315 desc = avcodec_descriptor_get(out_codec->id);
3317 out_codec_name = desc->name;
3318 if (!strcmp(encoder_name, out_codec_name))
3319 encoder_name = "native";
3322 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3323 in_codec_name, decoder_name,
3324 out_codec_name, encoder_name);
3326 av_log(NULL, AV_LOG_INFO, "\n");
3330 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3334 if (sdp_filename || want_sdp) {
3338 transcode_init_done = 1;
3343 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3344 static int need_output(void)
3348 for (i = 0; i < nb_output_streams; i++) {
3349 OutputStream *ost = output_streams[i];
3350 OutputFile *of = output_files[ost->file_index];
3351 AVFormatContext *os = output_files[ost->file_index]->ctx;
3353 if (ost->finished ||
3354 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3356 if (ost->frame_number >= ost->max_frames) {
3358 for (j = 0; j < of->ctx->nb_streams; j++)
3359 close_output_stream(output_streams[of->ost_index + j]);
3370 * Select the output stream to process.
3372 * @return selected output stream, or NULL if none available
3374 static OutputStream *choose_output(void)
3377 int64_t opts_min = INT64_MAX;
3378 OutputStream *ost_min = NULL;
3380 for (i = 0; i < nb_output_streams; i++) {
3381 OutputStream *ost = output_streams[i];
3382 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3384 if (!ost->finished && opts < opts_min) {
3386 ost_min = ost->unavailable ? NULL : ost;
3392 static int check_keyboard_interaction(int64_t cur_time)
3395 static int64_t last_time;
3396 if (received_nb_signals)
3397 return AVERROR_EXIT;
3398 /* read_key() returns 0 on EOF */
3399 if(cur_time - last_time >= 100000 && !run_as_daemon){
3401 last_time = cur_time;
3405 return AVERROR_EXIT;
3406 if (key == '+') av_log_set_level(av_log_get_level()+10);
3407 if (key == '-') av_log_set_level(av_log_get_level()-10);
3408 if (key == 's') qp_hist ^= 1;
3411 do_hex_dump = do_pkt_dump = 0;
3412 } else if(do_pkt_dump){
3416 av_log_set_level(AV_LOG_DEBUG);
3418 if (key == 'c' || key == 'C'){
3419 char buf[4096], target[64], command[256], arg[256] = {0};
3422 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3424 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3429 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3430 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3431 target, time, command, arg);
3432 for (i = 0; i < nb_filtergraphs; i++) {
3433 FilterGraph *fg = filtergraphs[i];
3436 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3437 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3438 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3439 } else if (key == 'c') {
3440 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3441 ret = AVERROR_PATCHWELCOME;
3443 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3445 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3450 av_log(NULL, AV_LOG_ERROR,
3451 "Parse error, at least 3 arguments were expected, "
3452 "only %d given in string '%s'\n", n, buf);
3455 if (key == 'd' || key == 'D'){
3458 debug = input_streams[0]->st->codec->debug<<1;
3459 if(!debug) debug = 1;
3460 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3466 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3470 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3471 fprintf(stderr,"error parsing debug value\n");
3473 for(i=0;i<nb_input_streams;i++) {
3474 input_streams[i]->st->codec->debug = debug;
3476 for(i=0;i<nb_output_streams;i++) {
3477 OutputStream *ost = output_streams[i];
3478 ost->enc_ctx->debug = debug;
3480 if(debug) av_log_set_level(AV_LOG_DEBUG);
3481 fprintf(stderr,"debug=%d\n", debug);
3484 fprintf(stderr, "key function\n"
3485 "? show this help\n"
3486 "+ increase verbosity\n"
3487 "- decrease verbosity\n"
3488 "c Send command to first matching filter supporting it\n"
3489 "C Send/Que command to all matching filters\n"
3490 "D cycle through available debug modes\n"
3491 "h dump packets/hex press to cycle through the 3 states\n"
3493 "s Show QP histogram\n"
3500 static void *input_thread(void *arg)
3503 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3508 ret = av_read_frame(f->ctx, &pkt);
3510 if (ret == AVERROR(EAGAIN)) {
3515 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3518 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3519 if (flags && ret == AVERROR(EAGAIN)) {
3521 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3522 av_log(f->ctx, AV_LOG_WARNING,
3523 "Thread message queue blocking; consider raising the "
3524 "thread_queue_size option (current value: %d)\n",
3525 f->thread_queue_size);
3528 if (ret != AVERROR_EOF)
3529 av_log(f->ctx, AV_LOG_ERROR,
3530 "Unable to send packet to main thread: %s\n",
3532 av_packet_unref(&pkt);
3533 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3541 static void free_input_threads(void)
3545 for (i = 0; i < nb_input_files; i++) {
3546 InputFile *f = input_files[i];
3549 if (!f || !f->in_thread_queue)
3551 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3552 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3553 av_packet_unref(&pkt);
3555 pthread_join(f->thread, NULL);
3557 av_thread_message_queue_free(&f->in_thread_queue);
3561 static int init_input_threads(void)
3565 if (nb_input_files == 1)
3568 for (i = 0; i < nb_input_files; i++) {
3569 InputFile *f = input_files[i];
3571 if (f->ctx->pb ? !f->ctx->pb->seekable :
3572 strcmp(f->ctx->iformat->name, "lavfi"))
3573 f->non_blocking = 1;
3574 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3575 f->thread_queue_size, sizeof(AVPacket));
3579 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3580 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3581 av_thread_message_queue_free(&f->in_thread_queue);
3582 return AVERROR(ret);
3588 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3590 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3592 AV_THREAD_MESSAGE_NONBLOCK : 0);
3596 static int get_input_packet(InputFile *f, AVPacket *pkt)
3600 for (i = 0; i < f->nb_streams; i++) {
3601 InputStream *ist = input_streams[f->ist_index + i];
3602 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3603 int64_t now = av_gettime_relative() - ist->start;
3605 return AVERROR(EAGAIN);
3610 if (nb_input_files > 1)
3611 return get_input_packet_mt(f, pkt);
3613 return av_read_frame(f->ctx, pkt);
3616 static int got_eagain(void)
3619 for (i = 0; i < nb_output_streams; i++)
3620 if (output_streams[i]->unavailable)
3625 static void reset_eagain(void)
3628 for (i = 0; i < nb_input_files; i++)
3629 input_files[i]->eagain = 0;
3630 for (i = 0; i < nb_output_streams; i++)
3631 output_streams[i]->unavailable = 0;
3634 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3635 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3636 AVRational time_base)
3642 return tmp_time_base;
3645 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3648 return tmp_time_base;
3654 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3657 AVCodecContext *avctx;
3658 int i, ret, has_audio = 0;
3659 int64_t duration = 0;
3661 ret = av_seek_frame(is, -1, is->start_time, 0);
3665 for (i = 0; i < ifile->nb_streams; i++) {
3666 ist = input_streams[ifile->ist_index + i];
3667 avctx = ist->dec_ctx;
3670 if (ist->decoding_needed) {
3671 process_input_packet(ist, NULL, 1);
3672 avcodec_flush_buffers(avctx);
3675 /* duration is the length of the last frame in a stream
3676 * when audio stream is present we don't care about
3677 * last video frame length because it's not defined exactly */
3678 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3682 for (i = 0; i < ifile->nb_streams; i++) {
3683 ist = input_streams[ifile->ist_index + i];
3684 avctx = ist->dec_ctx;
3687 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3688 AVRational sample_rate = {1, avctx->sample_rate};
3690 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3694 if (ist->framerate.num) {
3695 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3696 } else if (ist->st->avg_frame_rate.num) {
3697 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3698 } else duration = 1;
3700 if (!ifile->duration)
3701 ifile->time_base = ist->st->time_base;
3702 /* the total duration of the stream, max_pts - min_pts is
3703 * the duration of the stream without the last frame */
3704 duration += ist->max_pts - ist->min_pts;
3705 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3709 if (ifile->loop > 0)
3717 * - 0 -- one packet was read and processed
3718 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3719 * this function should be called again
3720 * - AVERROR_EOF -- this function should not be called again
3722 static int process_input(int file_index)
3724 InputFile *ifile = input_files[file_index];
3725 AVFormatContext *is;
3732 ret = get_input_packet(ifile, &pkt);
3734 if (ret == AVERROR(EAGAIN)) {
3738 if (ret < 0 && ifile->loop) {
3739 if ((ret = seek_to_start(ifile, is)) < 0)
3741 ret = get_input_packet(ifile, &pkt);
3744 if (ret != AVERROR_EOF) {
3745 print_error(is->filename, ret);
3750 for (i = 0; i < ifile->nb_streams; i++) {
3751 ist = input_streams[ifile->ist_index + i];
3752 if (ist->decoding_needed) {
3753 ret = process_input_packet(ist, NULL, 0);
3758 /* mark all outputs that don't go through lavfi as finished */
3759 for (j = 0; j < nb_output_streams; j++) {
3760 OutputStream *ost = output_streams[j];
3762 if (ost->source_index == ifile->ist_index + i &&
3763 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3764 finish_output_stream(ost);
3768 ifile->eof_reached = 1;
3769 return AVERROR(EAGAIN);
3775 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3776 is->streams[pkt.stream_index]);
3778 /* the following test is needed in case new streams appear
3779 dynamically in stream : we ignore them */
3780 if (pkt.stream_index >= ifile->nb_streams) {
3781 report_new_stream(file_index, &pkt);
3782 goto discard_packet;
3785 ist = input_streams[ifile->ist_index + pkt.stream_index];
3787 ist->data_size += pkt.size;
3791 goto discard_packet;
3793 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3794 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3799 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3800 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3801 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3802 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3803 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3804 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3805 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3806 av_ts2str(input_files[ist->file_index]->ts_offset),
3807 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3810 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3811 int64_t stime, stime2;
3812 // Correcting starttime based on the enabled streams
3813 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3814 // so we instead do it here as part of discontinuity handling
3815 if ( ist->next_dts == AV_NOPTS_VALUE
3816 && ifile->ts_offset == -is->start_time
3817 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3818 int64_t new_start_time = INT64_MAX;
3819 for (i=0; i<is->nb_streams; i++) {
3820 AVStream *st = is->streams[i];
3821 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3823 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3825 if (new_start_time > is->start_time) {
3826 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3827 ifile->ts_offset = -new_start_time;
3831 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3832 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3833 ist->wrap_correction_done = 1;
3835 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3836 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3837 ist->wrap_correction_done = 0;
3839 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3840 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3841 ist->wrap_correction_done = 0;
3845 /* add the stream-global side data to the first packet */
3846 if (ist->nb_packets == 1) {
3847 if (ist->st->nb_side_data)
3848 av_packet_split_side_data(&pkt);
3849 for (i = 0; i < ist->st->nb_side_data; i++) {
3850 AVPacketSideData *src_sd = &ist->st->side_data[i];
3853 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3855 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3858 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3862 memcpy(dst_data, src_sd->data, src_sd->size);
3866 if (pkt.dts != AV_NOPTS_VALUE)
3867 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3868 if (pkt.pts != AV_NOPTS_VALUE)
3869 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3871 if (pkt.pts != AV_NOPTS_VALUE)
3872 pkt.pts *= ist->ts_scale;
3873 if (pkt.dts != AV_NOPTS_VALUE)
3874 pkt.dts *= ist->ts_scale;
3876 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3877 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3878 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3879 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3880 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3881 int64_t delta = pkt_dts - ifile->last_ts;
3882 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3883 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3884 ifile->ts_offset -= delta;
3885 av_log(NULL, AV_LOG_DEBUG,
3886 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3887 delta, ifile->ts_offset);
3888 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3889 if (pkt.pts != AV_NOPTS_VALUE)
3890 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3894 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3895 if (pkt.pts != AV_NOPTS_VALUE) {
3896 pkt.pts += duration;
3897 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3898 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3901 if (pkt.dts != AV_NOPTS_VALUE)
3902 pkt.dts += duration;
3904 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3905 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3906 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3908 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3909 int64_t delta = pkt_dts - ist->next_dts;
3910 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3911 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3912 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3913 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3914 ifile->ts_offset -= delta;
3915 av_log(NULL, AV_LOG_DEBUG,
3916 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3917 delta, ifile->ts_offset);
3918 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3919 if (pkt.pts != AV_NOPTS_VALUE)
3920 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3923 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3924 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3925 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3926 pkt.dts = AV_NOPTS_VALUE;
3928 if (pkt.pts != AV_NOPTS_VALUE){
3929 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3930 delta = pkt_pts - ist->next_dts;
3931 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3932 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3933 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3934 pkt.pts = AV_NOPTS_VALUE;
3940 if (pkt.dts != AV_NOPTS_VALUE)
3941 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3944 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3945 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3946 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3947 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3948 av_ts2str(input_files[ist->file_index]->ts_offset),
3949 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3952 sub2video_heartbeat(ist, pkt.pts);
3954 process_input_packet(ist, &pkt, 0);
3957 av_packet_unref(&pkt);
3963 * Perform a step of transcoding for the specified filter graph.
3965 * @param[in] graph filter graph to consider
3966 * @param[out] best_ist input stream where a frame would allow to continue
3967 * @return 0 for success, <0 for error
3969 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3972 int nb_requests, nb_requests_max = 0;
3973 InputFilter *ifilter;
3977 ret = avfilter_graph_request_oldest(graph->graph);
3979 return reap_filters(0);
3981 if (ret == AVERROR_EOF) {
3982 ret = reap_filters(1);
3983 for (i = 0; i < graph->nb_outputs; i++)
3984 close_output_stream(graph->outputs[i]->ost);
3987 if (ret != AVERROR(EAGAIN))
3990 for (i = 0; i < graph->nb_inputs; i++) {
3991 ifilter = graph->inputs[i];
3993 if (input_files[ist->file_index]->eagain ||
3994 input_files[ist->file_index]->eof_reached)
3996 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3997 if (nb_requests > nb_requests_max) {
3998 nb_requests_max = nb_requests;
4004 for (i = 0; i < graph->nb_outputs; i++)
4005 graph->outputs[i]->ost->unavailable = 1;
4011 * Run a single step of transcoding.
4013 * @return 0 for success, <0 for error
4015 static int transcode_step(void)
4021 ost = choose_output();
4028 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4033 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4038 av_assert0(ost->source_index >= 0);
4039 ist = input_streams[ost->source_index];
4042 ret = process_input(ist->file_index);
4043 if (ret == AVERROR(EAGAIN)) {
4044 if (input_files[ist->file_index]->eagain)
4045 ost->unavailable = 1;
4050 return ret == AVERROR_EOF ? 0 : ret;
4052 return reap_filters(0);
4056 * The following code is the main loop of the file converter
4058 static int transcode(void)
4061 AVFormatContext *os;
4064 int64_t timer_start;
4065 int64_t total_packets_written = 0;
4067 ret = transcode_init();
4071 if (stdin_interaction) {
4072 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4075 timer_start = av_gettime_relative();
4078 if ((ret = init_input_threads()) < 0)
4082 while (!received_sigterm) {
4083 int64_t cur_time= av_gettime_relative();
4085 /* if 'q' pressed, exits */
4086 if (stdin_interaction)
4087 if (check_keyboard_interaction(cur_time) < 0)
4090 /* check if there's any stream where output is still needed */
4091 if (!need_output()) {
4092 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4096 ret = transcode_step();
4098 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4102 av_strerror(ret, errbuf, sizeof(errbuf));
4104 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4109 /* dump report by using the output first video and audio streams */
4110 print_report(0, timer_start, cur_time);
4113 free_input_threads();
4116 /* at the end of stream, we must flush the decoder buffers */
4117 for (i = 0; i < nb_input_streams; i++) {
4118 ist = input_streams[i];
4119 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4120 process_input_packet(ist, NULL, 0);
4127 /* write the trailer if needed and close file */
4128 for (i = 0; i < nb_output_files; i++) {
4129 os = output_files[i]->ctx;
4130 if ((ret = av_write_trailer(os)) < 0) {
4131 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4137 /* dump report by using the first video and audio streams */
4138 print_report(1, timer_start, av_gettime_relative());
4140 /* close each encoder */
4141 for (i = 0; i < nb_output_streams; i++) {
4142 ost = output_streams[i];
4143 if (ost->encoding_needed) {
4144 av_freep(&ost->enc_ctx->stats_in);
4146 total_packets_written += ost->packets_written;
4149 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4150 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4154 /* close each decoder */
4155 for (i = 0; i < nb_input_streams; i++) {
4156 ist = input_streams[i];
4157 if (ist->decoding_needed) {
4158 avcodec_close(ist->dec_ctx);
4159 if (ist->hwaccel_uninit)
4160 ist->hwaccel_uninit(ist->dec_ctx);
4169 free_input_threads();
4172 if (output_streams) {
4173 for (i = 0; i < nb_output_streams; i++) {
4174 ost = output_streams[i];
4177 fclose(ost->logfile);
4178 ost->logfile = NULL;
4180 av_freep(&ost->forced_kf_pts);
4181 av_freep(&ost->apad);
4182 av_freep(&ost->disposition);
4183 av_dict_free(&ost->encoder_opts);
4184 av_dict_free(&ost->sws_dict);
4185 av_dict_free(&ost->swr_opts);
4186 av_dict_free(&ost->resample_opts);
4187 av_dict_free(&ost->bsf_args);
4195 static int64_t getutime(void)
4198 struct rusage rusage;
4200 getrusage(RUSAGE_SELF, &rusage);
4201 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4202 #elif HAVE_GETPROCESSTIMES
4204 FILETIME c, e, k, u;
4205 proc = GetCurrentProcess();
4206 GetProcessTimes(proc, &c, &e, &k, &u);
4207 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4209 return av_gettime_relative();
4213 static int64_t getmaxrss(void)
4215 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4216 struct rusage rusage;
4217 getrusage(RUSAGE_SELF, &rusage);
4218 return (int64_t)rusage.ru_maxrss * 1024;
4219 #elif HAVE_GETPROCESSMEMORYINFO
4221 PROCESS_MEMORY_COUNTERS memcounters;
4222 proc = GetCurrentProcess();
4223 memcounters.cb = sizeof(memcounters);
4224 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4225 return memcounters.PeakPagefileUsage;
4231 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4235 int main(int argc, char **argv)
4240 register_exit(ffmpeg_cleanup);
4242 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4244 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4245 parse_loglevel(argc, argv, options);
4247 if(argc>1 && !strcmp(argv[1], "-d")){
4249 av_log_set_callback(log_callback_null);
4254 avcodec_register_all();
4256 avdevice_register_all();
4258 avfilter_register_all();
4260 avformat_network_init();
4262 show_banner(argc, argv, options);
4266 /* parse options and open all input/output files */
4267 ret = ffmpeg_parse_options(argc, argv);
4271 if (nb_output_files <= 0 && nb_input_files == 0) {
4273 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4277 /* file converter / grab */
4278 if (nb_output_files <= 0) {
4279 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4283 // if (nb_input_files == 0) {
4284 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4288 current_time = ti = getutime();
4289 if (transcode() < 0)
4291 ti = getutime() - ti;
4293 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4295 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4296 decode_error_stat[0], decode_error_stat[1]);
4297 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4300 exit_program(received_nb_signals ? 255 : main_return_code);
4301 return main_return_code;