2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
201 pal = (uint32_t *)r->data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
533 avcodec_parameters_free(&ost->ref_par);
535 av_freep(&output_streams[i]);
538 free_input_threads();
540 for (i = 0; i < nb_input_files; i++) {
541 avformat_close_input(&input_files[i]->ctx);
542 av_freep(&input_files[i]);
544 for (i = 0; i < nb_input_streams; i++) {
545 InputStream *ist = input_streams[i];
547 av_frame_free(&ist->decoded_frame);
548 av_frame_free(&ist->filter_frame);
549 av_dict_free(&ist->decoder_opts);
550 avsubtitle_free(&ist->prev_sub.subtitle);
551 av_frame_free(&ist->sub2video.frame);
552 av_freep(&ist->filters);
553 av_freep(&ist->hwaccel_device);
555 avcodec_free_context(&ist->dec_ctx);
557 av_freep(&input_streams[i]);
561 if (fclose(vstats_file))
562 av_log(NULL, AV_LOG_ERROR,
563 "Error closing vstats file, loss of information possible: %s\n",
564 av_err2str(AVERROR(errno)));
566 av_freep(&vstats_filename);
568 av_freep(&input_streams);
569 av_freep(&input_files);
570 av_freep(&output_streams);
571 av_freep(&output_files);
575 avformat_network_deinit();
577 if (received_sigterm) {
578 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579 (int) received_sigterm);
580 } else if (ret && transcode_init_done) {
581 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
587 void remove_avoptions(AVDictionary **a, AVDictionary *b)
589 AVDictionaryEntry *t = NULL;
591 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
596 void assert_avoptions(AVDictionary *m)
598 AVDictionaryEntry *t;
599 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
605 static void abort_codec_experimental(AVCodec *c, int encoder)
610 static void update_benchmark(const char *fmt, ...)
612 if (do_benchmark_all) {
613 int64_t t = getutime();
619 vsnprintf(buf, sizeof(buf), fmt, va);
621 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
630 for (i = 0; i < nb_output_streams; i++) {
631 OutputStream *ost2 = output_streams[i];
632 ost2->finished |= ost == ost2 ? this_stream : others;
636 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
638 AVStream *st = ost->st;
639 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
640 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
643 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num && ost->is_cfr) {
676 if (pkt->duration > 0)
677 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
678 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
684 av_packet_split_side_data(pkt);
686 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
687 print_error("", ret);
691 if (pkt->size == 0 && pkt->side_data_elems == 0)
693 if (!st->codecpar->extradata_size && avctx->extradata_size) {
694 st->codecpar->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
695 if (!st->codecpar->extradata) {
696 av_log(NULL, AV_LOG_ERROR, "Could not allocate extradata buffer to copy parser data.\n");
699 st->codecpar->extradata_size = avctx->extradata_size;
700 memcpy(st->codecpar->extradata, avctx->extradata, avctx->extradata_size);
703 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
704 if (pkt->dts != AV_NOPTS_VALUE &&
705 pkt->pts != AV_NOPTS_VALUE &&
706 pkt->dts > pkt->pts) {
707 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
709 ost->file_index, ost->st->index);
711 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
712 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
713 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
715 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
716 pkt->dts != AV_NOPTS_VALUE &&
717 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
718 ost->last_mux_dts != AV_NOPTS_VALUE) {
719 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
720 if (pkt->dts < max) {
721 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
722 av_log(s, loglevel, "Non-monotonous DTS in output stream "
723 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
724 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
726 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
729 av_log(s, loglevel, "changing to %"PRId64". This may result "
730 "in incorrect timestamps in the output file.\n",
732 if (pkt->pts >= pkt->dts)
733 pkt->pts = FFMAX(pkt->pts, max);
738 ost->last_mux_dts = pkt->dts;
740 ost->data_size += pkt->size;
741 ost->packets_written++;
743 pkt->stream_index = ost->index;
746 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
747 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
748 av_get_media_type_string(ost->enc_ctx->codec_type),
749 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
750 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
755 ret = av_interleaved_write_frame(s, pkt);
757 print_error("av_interleaved_write_frame()", ret);
758 main_return_code = 1;
759 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
761 av_packet_unref(pkt);
764 static void close_output_stream(OutputStream *ost)
766 OutputFile *of = output_files[ost->file_index];
768 ost->finished |= ENCODER_FINISHED;
770 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
771 of->recording_time = FFMIN(of->recording_time, end);
775 static int check_recording_time(OutputStream *ost)
777 OutputFile *of = output_files[ost->file_index];
779 if (of->recording_time != INT64_MAX &&
780 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
781 AV_TIME_BASE_Q) >= 0) {
782 close_output_stream(ost);
788 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
791 AVCodecContext *enc = ost->enc_ctx;
795 av_init_packet(&pkt);
799 if (!check_recording_time(ost))
802 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
803 frame->pts = ost->sync_opts;
804 ost->sync_opts = frame->pts + frame->nb_samples;
805 ost->samples_encoded += frame->nb_samples;
806 ost->frames_encoded++;
808 av_assert0(pkt.size || !pkt.data);
809 update_benchmark(NULL);
811 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
812 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
813 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
814 enc->time_base.num, enc->time_base.den);
817 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
818 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
821 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
824 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
827 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
828 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
829 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
830 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
833 write_frame(s, &pkt, ost);
837 static void do_subtitle_out(AVFormatContext *s,
842 int subtitle_out_max_size = 1024 * 1024;
843 int subtitle_out_size, nb, i;
848 if (sub->pts == AV_NOPTS_VALUE) {
849 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
858 subtitle_out = av_malloc(subtitle_out_max_size);
860 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
865 /* Note: DVB subtitle need one packet to draw them and one other
866 packet to clear them */
867 /* XXX: signal it in the codec context ? */
868 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
873 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
875 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
876 pts -= output_files[ost->file_index]->start_time;
877 for (i = 0; i < nb; i++) {
878 unsigned save_num_rects = sub->num_rects;
880 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
881 if (!check_recording_time(ost))
885 // start_display_time is required to be 0
886 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
887 sub->end_display_time -= sub->start_display_time;
888 sub->start_display_time = 0;
892 ost->frames_encoded++;
894 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
895 subtitle_out_max_size, sub);
897 sub->num_rects = save_num_rects;
898 if (subtitle_out_size < 0) {
899 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
903 av_init_packet(&pkt);
904 pkt.data = subtitle_out;
905 pkt.size = subtitle_out_size;
906 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
907 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
908 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
909 /* XXX: the pts correction is handled here. Maybe handling
910 it in the codec would be better */
912 pkt.pts += 90 * sub->start_display_time;
914 pkt.pts += 90 * sub->end_display_time;
917 write_frame(s, &pkt, ost);
921 static void do_video_out(AVFormatContext *s,
923 AVFrame *next_picture,
926 int ret, format_video_sync;
928 AVCodecContext *enc = ost->enc_ctx;
929 AVCodecParameters *mux_par = ost->st->codecpar;
930 int nb_frames, nb0_frames, i;
931 double delta, delta0;
934 InputStream *ist = NULL;
935 AVFilterContext *filter = ost->filter->filter;
937 if (ost->source_index >= 0)
938 ist = input_streams[ost->source_index];
940 if (filter->inputs[0]->frame_rate.num > 0 &&
941 filter->inputs[0]->frame_rate.den > 0)
942 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
944 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
945 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
947 if (!ost->filters_script &&
951 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
952 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
957 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
958 ost->last_nb0_frames[1],
959 ost->last_nb0_frames[2]);
961 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
962 delta = delta0 + duration;
964 /* by default, we output a single frame */
965 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
968 format_video_sync = video_sync_method;
969 if (format_video_sync == VSYNC_AUTO) {
970 if(!strcmp(s->oformat->name, "avi")) {
971 format_video_sync = VSYNC_VFR;
973 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
975 && format_video_sync == VSYNC_CFR
976 && input_files[ist->file_index]->ctx->nb_streams == 1
977 && input_files[ist->file_index]->input_ts_offset == 0) {
978 format_video_sync = VSYNC_VSCFR;
980 if (format_video_sync == VSYNC_CFR && copy_ts) {
981 format_video_sync = VSYNC_VSCFR;
984 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
988 format_video_sync != VSYNC_PASSTHROUGH &&
989 format_video_sync != VSYNC_DROP) {
991 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
993 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
994 sync_ipts = ost->sync_opts;
999 switch (format_video_sync) {
1001 if (ost->frame_number == 0 && delta0 >= 0.5) {
1002 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1005 ost->sync_opts = lrint(sync_ipts);
1008 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1009 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1011 } else if (delta < -1.1)
1013 else if (delta > 1.1) {
1014 nb_frames = lrintf(delta);
1016 nb0_frames = lrintf(delta0 - 0.6);
1022 else if (delta > 0.6)
1023 ost->sync_opts = lrint(sync_ipts);
1026 case VSYNC_PASSTHROUGH:
1027 ost->sync_opts = lrint(sync_ipts);
1034 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1035 nb0_frames = FFMIN(nb0_frames, nb_frames);
1037 memmove(ost->last_nb0_frames + 1,
1038 ost->last_nb0_frames,
1039 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1040 ost->last_nb0_frames[0] = nb0_frames;
1042 if (nb0_frames == 0 && ost->last_dropped) {
1044 av_log(NULL, AV_LOG_VERBOSE,
1045 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1046 ost->frame_number, ost->st->index, ost->last_frame->pts);
1048 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1049 if (nb_frames > dts_error_threshold * 30) {
1050 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1054 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1055 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1057 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1059 /* duplicates frame if needed */
1060 for (i = 0; i < nb_frames; i++) {
1061 AVFrame *in_picture;
1062 av_init_packet(&pkt);
1066 if (i < nb0_frames && ost->last_frame) {
1067 in_picture = ost->last_frame;
1069 in_picture = next_picture;
1074 in_picture->pts = ost->sync_opts;
1077 if (!check_recording_time(ost))
1079 if (ost->frame_number >= ost->max_frames)
1083 #if FF_API_LAVF_FMT_RAWPICTURE
1084 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1085 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1086 /* raw pictures are written as AVPicture structure to
1087 avoid any copies. We support temporarily the older
1089 if (in_picture->interlaced_frame)
1090 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1092 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1093 pkt.data = (uint8_t *)in_picture;
1094 pkt.size = sizeof(AVPicture);
1095 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1096 pkt.flags |= AV_PKT_FLAG_KEY;
1098 write_frame(s, &pkt, ost);
1102 int got_packet, forced_keyframe = 0;
1105 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1106 ost->top_field_first >= 0)
1107 in_picture->top_field_first = !!ost->top_field_first;
1109 if (in_picture->interlaced_frame) {
1110 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1111 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1113 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1115 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1117 in_picture->quality = enc->global_quality;
1118 in_picture->pict_type = 0;
1120 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1121 in_picture->pts * av_q2d(enc->time_base) : NAN;
1122 if (ost->forced_kf_index < ost->forced_kf_count &&
1123 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1124 ost->forced_kf_index++;
1125 forced_keyframe = 1;
1126 } else if (ost->forced_keyframes_pexpr) {
1128 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1129 res = av_expr_eval(ost->forced_keyframes_pexpr,
1130 ost->forced_keyframes_expr_const_values, NULL);
1131 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1132 ost->forced_keyframes_expr_const_values[FKF_N],
1133 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1134 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1135 ost->forced_keyframes_expr_const_values[FKF_T],
1136 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1139 forced_keyframe = 1;
1140 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1141 ost->forced_keyframes_expr_const_values[FKF_N];
1142 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1143 ost->forced_keyframes_expr_const_values[FKF_T];
1144 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1147 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1148 } else if ( ost->forced_keyframes
1149 && !strncmp(ost->forced_keyframes, "source", 6)
1150 && in_picture->key_frame==1) {
1151 forced_keyframe = 1;
1154 if (forced_keyframe) {
1155 in_picture->pict_type = AV_PICTURE_TYPE_I;
1156 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1159 update_benchmark(NULL);
1161 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1162 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1163 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1164 enc->time_base.num, enc->time_base.den);
1167 ost->frames_encoded++;
1169 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1170 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1172 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1178 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1179 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1180 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1181 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1184 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1185 pkt.pts = ost->sync_opts;
1187 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1190 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1191 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1192 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1193 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1196 frame_size = pkt.size;
1197 write_frame(s, &pkt, ost);
1199 /* if two pass, output log */
1200 if (ost->logfile && enc->stats_out) {
1201 fprintf(ost->logfile, "%s", enc->stats_out);
1207 * For video, number of frames in == number of packets out.
1208 * But there may be reordering, so we can't throw away frames on encoder
1209 * flush, we need to limit them here, before they go into encoder.
1211 ost->frame_number++;
1213 if (vstats_filename && frame_size)
1214 do_video_stats(ost, frame_size);
1217 if (!ost->last_frame)
1218 ost->last_frame = av_frame_alloc();
1219 av_frame_unref(ost->last_frame);
1220 if (next_picture && ost->last_frame)
1221 av_frame_ref(ost->last_frame, next_picture);
1223 av_frame_free(&ost->last_frame);
1226 static double psnr(double d)
1228 return -10.0 * log10(d);
1231 static void do_video_stats(OutputStream *ost, int frame_size)
1233 AVCodecContext *enc;
1235 double ti1, bitrate, avg_bitrate;
1237 /* this is executed just the first time do_video_stats is called */
1239 vstats_file = fopen(vstats_filename, "w");
1247 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1248 frame_number = ost->st->nb_frames;
1249 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1250 ost->quality / (float)FF_QP2LAMBDA);
1252 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1253 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1255 fprintf(vstats_file,"f_size= %6d ", frame_size);
1256 /* compute pts value */
1257 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1261 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1262 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1263 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1264 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1265 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1269 static void finish_output_stream(OutputStream *ost)
1271 OutputFile *of = output_files[ost->file_index];
1274 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1277 for (i = 0; i < of->ctx->nb_streams; i++)
1278 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1283 * Get and encode new output from any of the filtergraphs, without causing
1286 * @return 0 for success, <0 for severe errors
1288 static int reap_filters(int flush)
1290 AVFrame *filtered_frame = NULL;
1293 /* Reap all buffers present in the buffer sinks */
1294 for (i = 0; i < nb_output_streams; i++) {
1295 OutputStream *ost = output_streams[i];
1296 OutputFile *of = output_files[ost->file_index];
1297 AVFilterContext *filter;
1298 AVCodecContext *enc = ost->enc_ctx;
1303 filter = ost->filter->filter;
1305 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1306 return AVERROR(ENOMEM);
1308 filtered_frame = ost->filtered_frame;
1311 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1312 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1313 AV_BUFFERSINK_FLAG_NO_REQUEST);
1315 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1316 av_log(NULL, AV_LOG_WARNING,
1317 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1318 } else if (flush && ret == AVERROR_EOF) {
1319 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1320 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1324 if (ost->finished) {
1325 av_frame_unref(filtered_frame);
1328 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1329 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1330 AVRational tb = enc->time_base;
1331 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1333 tb.den <<= extra_bits;
1335 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1336 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1337 float_pts /= 1 << extra_bits;
1338 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1339 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1341 filtered_frame->pts =
1342 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1343 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1345 //if (ost->source_index >= 0)
1346 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1348 switch (filter->inputs[0]->type) {
1349 case AVMEDIA_TYPE_VIDEO:
1350 if (!ost->frame_aspect_ratio.num)
1351 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1354 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1355 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1357 enc->time_base.num, enc->time_base.den);
1360 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1362 case AVMEDIA_TYPE_AUDIO:
1363 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1364 enc->channels != av_frame_get_channels(filtered_frame)) {
1365 av_log(NULL, AV_LOG_ERROR,
1366 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1369 do_audio_out(of->ctx, ost, filtered_frame);
1372 // TODO support subtitle filters
1376 av_frame_unref(filtered_frame);
1383 static void print_final_stats(int64_t total_size)
1385 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1386 uint64_t subtitle_size = 0;
1387 uint64_t data_size = 0;
1388 float percent = -1.0;
1392 for (i = 0; i < nb_output_streams; i++) {
1393 OutputStream *ost = output_streams[i];
1394 switch (ost->enc_ctx->codec_type) {
1395 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1396 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1397 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1398 default: other_size += ost->data_size; break;
1400 extra_size += ost->enc_ctx->extradata_size;
1401 data_size += ost->data_size;
1402 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1403 != AV_CODEC_FLAG_PASS1)
1407 if (data_size && total_size>0 && total_size >= data_size)
1408 percent = 100.0 * (total_size - data_size) / data_size;
1410 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1411 video_size / 1024.0,
1412 audio_size / 1024.0,
1413 subtitle_size / 1024.0,
1414 other_size / 1024.0,
1415 extra_size / 1024.0);
1417 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1419 av_log(NULL, AV_LOG_INFO, "unknown");
1420 av_log(NULL, AV_LOG_INFO, "\n");
1422 /* print verbose per-stream stats */
1423 for (i = 0; i < nb_input_files; i++) {
1424 InputFile *f = input_files[i];
1425 uint64_t total_packets = 0, total_size = 0;
1427 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1428 i, f->ctx->filename);
1430 for (j = 0; j < f->nb_streams; j++) {
1431 InputStream *ist = input_streams[f->ist_index + j];
1432 enum AVMediaType type = ist->dec_ctx->codec_type;
1434 total_size += ist->data_size;
1435 total_packets += ist->nb_packets;
1437 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1438 i, j, media_type_string(type));
1439 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1440 ist->nb_packets, ist->data_size);
1442 if (ist->decoding_needed) {
1443 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1444 ist->frames_decoded);
1445 if (type == AVMEDIA_TYPE_AUDIO)
1446 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1447 av_log(NULL, AV_LOG_VERBOSE, "; ");
1450 av_log(NULL, AV_LOG_VERBOSE, "\n");
1453 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1454 total_packets, total_size);
1457 for (i = 0; i < nb_output_files; i++) {
1458 OutputFile *of = output_files[i];
1459 uint64_t total_packets = 0, total_size = 0;
1461 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1462 i, of->ctx->filename);
1464 for (j = 0; j < of->ctx->nb_streams; j++) {
1465 OutputStream *ost = output_streams[of->ost_index + j];
1466 enum AVMediaType type = ost->enc_ctx->codec_type;
1468 total_size += ost->data_size;
1469 total_packets += ost->packets_written;
1471 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1472 i, j, media_type_string(type));
1473 if (ost->encoding_needed) {
1474 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1475 ost->frames_encoded);
1476 if (type == AVMEDIA_TYPE_AUDIO)
1477 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1478 av_log(NULL, AV_LOG_VERBOSE, "; ");
1481 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1482 ost->packets_written, ost->data_size);
1484 av_log(NULL, AV_LOG_VERBOSE, "\n");
1487 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1488 total_packets, total_size);
1490 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1491 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1493 av_log(NULL, AV_LOG_WARNING, "\n");
1495 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1500 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1503 AVBPrint buf_script;
1505 AVFormatContext *oc;
1507 AVCodecContext *enc;
1508 int frame_number, vid, i;
1511 int64_t pts = INT64_MIN + 1;
1512 static int64_t last_time = -1;
1513 static int qp_histogram[52];
1514 int hours, mins, secs, us;
1518 if (!print_stats && !is_last_report && !progress_avio)
1521 if (!is_last_report) {
1522 if (last_time == -1) {
1523 last_time = cur_time;
1526 if ((cur_time - last_time) < 500000)
1528 last_time = cur_time;
1531 t = (cur_time-timer_start) / 1000000.0;
1534 oc = output_files[0]->ctx;
1536 total_size = avio_size(oc->pb);
1537 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1538 total_size = avio_tell(oc->pb);
1542 av_bprint_init(&buf_script, 0, 1);
1543 for (i = 0; i < nb_output_streams; i++) {
1545 ost = output_streams[i];
1547 if (!ost->stream_copy)
1548 q = ost->quality / (float) FF_QP2LAMBDA;
1550 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1551 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1552 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1553 ost->file_index, ost->index, q);
1555 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1558 frame_number = ost->frame_number;
1559 fps = t > 1 ? frame_number / t : 0;
1560 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1561 frame_number, fps < 9.95, fps, q);
1562 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1563 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1564 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1565 ost->file_index, ost->index, q);
1567 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1571 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1573 for (j = 0; j < 32; j++)
1574 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1577 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1579 double error, error_sum = 0;
1580 double scale, scale_sum = 0;
1582 char type[3] = { 'Y','U','V' };
1583 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1584 for (j = 0; j < 3; j++) {
1585 if (is_last_report) {
1586 error = enc->error[j];
1587 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1589 error = ost->error[j];
1590 scale = enc->width * enc->height * 255.0 * 255.0;
1596 p = psnr(error / scale);
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1598 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1599 ost->file_index, ost->index, type[j] | 32, p);
1601 p = psnr(error_sum / scale_sum);
1602 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1603 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1604 ost->file_index, ost->index, p);
1608 /* compute min output value */
1609 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1610 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1611 ost->st->time_base, AV_TIME_BASE_Q));
1613 nb_frames_drop += ost->last_dropped;
1616 secs = FFABS(pts) / AV_TIME_BASE;
1617 us = FFABS(pts) % AV_TIME_BASE;
1623 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1624 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1626 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1628 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1629 "size=%8.0fkB time=", total_size / 1024.0);
1631 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1632 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1633 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1634 (100 * us) / AV_TIME_BASE);
1637 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1638 av_bprintf(&buf_script, "bitrate=N/A\n");
1640 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1641 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1644 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1645 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1646 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1647 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1648 hours, mins, secs, us);
1650 if (nb_frames_dup || nb_frames_drop)
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1652 nb_frames_dup, nb_frames_drop);
1653 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1654 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1658 av_bprintf(&buf_script, "speed=N/A\n");
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1661 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1664 if (print_stats || is_last_report) {
1665 const char end = is_last_report ? '\n' : '\r';
1666 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1667 fprintf(stderr, "%s %c", buf, end);
1669 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1674 if (progress_avio) {
1675 av_bprintf(&buf_script, "progress=%s\n",
1676 is_last_report ? "end" : "continue");
1677 avio_write(progress_avio, buf_script.str,
1678 FFMIN(buf_script.len, buf_script.size - 1));
1679 avio_flush(progress_avio);
1680 av_bprint_finalize(&buf_script, NULL);
1681 if (is_last_report) {
1682 if ((ret = avio_closep(&progress_avio)) < 0)
1683 av_log(NULL, AV_LOG_ERROR,
1684 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1689 print_final_stats(total_size);
1692 static void flush_encoders(void)
1696 for (i = 0; i < nb_output_streams; i++) {
1697 OutputStream *ost = output_streams[i];
1698 AVCodecContext *enc = ost->enc_ctx;
1699 AVFormatContext *os = output_files[ost->file_index]->ctx;
1700 int stop_encoding = 0;
1702 if (!ost->encoding_needed)
1705 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1707 #if FF_API_LAVF_FMT_RAWPICTURE
1708 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1713 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1716 switch (enc->codec_type) {
1717 case AVMEDIA_TYPE_AUDIO:
1718 encode = avcodec_encode_audio2;
1721 case AVMEDIA_TYPE_VIDEO:
1722 encode = avcodec_encode_video2;
1733 av_init_packet(&pkt);
1737 update_benchmark(NULL);
1738 ret = encode(enc, &pkt, NULL, &got_packet);
1739 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1741 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1746 if (ost->logfile && enc->stats_out) {
1747 fprintf(ost->logfile, "%s", enc->stats_out);
1753 if (ost->finished & MUXER_FINISHED) {
1754 av_packet_unref(&pkt);
1757 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1758 pkt_size = pkt.size;
1759 write_frame(os, &pkt, ost);
1760 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1761 do_video_stats(ost, pkt_size);
1772 * Check whether a packet from ist should be written into ost at this time
1774 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1776 OutputFile *of = output_files[ost->file_index];
1777 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1779 if (ost->source_index != ist_index)
1785 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1791 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1793 OutputFile *of = output_files[ost->file_index];
1794 InputFile *f = input_files [ist->file_index];
1795 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1796 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1800 av_init_packet(&opkt);
1802 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1803 !ost->copy_initial_nonkeyframes)
1806 if (!ost->frame_number && !ost->copy_prior_start) {
1807 int64_t comp_start = start_time;
1808 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1809 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1810 if (pkt->pts == AV_NOPTS_VALUE ?
1811 ist->pts < comp_start :
1812 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1816 if (of->recording_time != INT64_MAX &&
1817 ist->pts >= of->recording_time + start_time) {
1818 close_output_stream(ost);
1822 if (f->recording_time != INT64_MAX) {
1823 start_time = f->ctx->start_time;
1824 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1825 start_time += f->start_time;
1826 if (ist->pts >= f->recording_time + start_time) {
1827 close_output_stream(ost);
1832 /* force the input stream PTS */
1833 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1836 if (pkt->pts != AV_NOPTS_VALUE)
1837 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1839 opkt.pts = AV_NOPTS_VALUE;
1841 if (pkt->dts == AV_NOPTS_VALUE)
1842 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1844 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1845 opkt.dts -= ost_tb_start_time;
1847 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1848 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1850 duration = ist->dec_ctx->frame_size;
1851 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1852 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1853 ost->st->time_base) - ost_tb_start_time;
1856 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1857 opkt.flags = pkt->flags;
1858 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1859 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1860 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1861 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1862 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1864 int ret = av_parser_change(ost->parser, ost->st->codec,
1865 &opkt.data, &opkt.size,
1866 pkt->data, pkt->size,
1867 pkt->flags & AV_PKT_FLAG_KEY);
1869 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1874 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1879 opkt.data = pkt->data;
1880 opkt.size = pkt->size;
1882 av_copy_packet_side_data(&opkt, pkt);
1884 #if FF_API_LAVF_FMT_RAWPICTURE
1885 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1886 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1887 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1888 /* store AVPicture in AVPacket, as expected by the output format */
1889 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1891 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1895 opkt.data = (uint8_t *)&pict;
1896 opkt.size = sizeof(AVPicture);
1897 opkt.flags |= AV_PKT_FLAG_KEY;
1901 write_frame(of->ctx, &opkt, ost);
1904 int guess_input_channel_layout(InputStream *ist)
1906 AVCodecContext *dec = ist->dec_ctx;
1908 if (!dec->channel_layout) {
1909 char layout_name[256];
1911 if (dec->channels > ist->guess_layout_max)
1913 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1914 if (!dec->channel_layout)
1916 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1917 dec->channels, dec->channel_layout);
1918 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1919 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1924 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1926 if (*got_output || ret<0)
1927 decode_error_stat[ret<0] ++;
1929 if (ret < 0 && exit_on_error)
1932 if (exit_on_error && *got_output && ist) {
1933 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1934 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1940 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1942 AVFrame *decoded_frame, *f;
1943 AVCodecContext *avctx = ist->dec_ctx;
1944 int i, ret, err = 0, resample_changed;
1945 AVRational decoded_frame_tb;
1947 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1948 return AVERROR(ENOMEM);
1949 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1950 return AVERROR(ENOMEM);
1951 decoded_frame = ist->decoded_frame;
1953 update_benchmark(NULL);
1954 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1955 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1957 if (ret >= 0 && avctx->sample_rate <= 0) {
1958 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1959 ret = AVERROR_INVALIDDATA;
1962 check_decode_result(ist, got_output, ret);
1964 if (!*got_output || ret < 0)
1967 ist->samples_decoded += decoded_frame->nb_samples;
1968 ist->frames_decoded++;
1971 /* increment next_dts to use for the case where the input stream does not
1972 have timestamps or there are multiple frames in the packet */
1973 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1975 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1979 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1980 ist->resample_channels != avctx->channels ||
1981 ist->resample_channel_layout != decoded_frame->channel_layout ||
1982 ist->resample_sample_rate != decoded_frame->sample_rate;
1983 if (resample_changed) {
1984 char layout1[64], layout2[64];
1986 if (!guess_input_channel_layout(ist)) {
1987 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1988 "layout for Input Stream #%d.%d\n", ist->file_index,
1992 decoded_frame->channel_layout = avctx->channel_layout;
1994 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1995 ist->resample_channel_layout);
1996 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1997 decoded_frame->channel_layout);
1999 av_log(NULL, AV_LOG_INFO,
2000 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2001 ist->file_index, ist->st->index,
2002 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2003 ist->resample_channels, layout1,
2004 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2005 avctx->channels, layout2);
2007 ist->resample_sample_fmt = decoded_frame->format;
2008 ist->resample_sample_rate = decoded_frame->sample_rate;
2009 ist->resample_channel_layout = decoded_frame->channel_layout;
2010 ist->resample_channels = avctx->channels;
2012 for (i = 0; i < nb_filtergraphs; i++)
2013 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2014 FilterGraph *fg = filtergraphs[i];
2015 if (configure_filtergraph(fg) < 0) {
2016 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2022 /* if the decoder provides a pts, use it instead of the last packet pts.
2023 the decoder could be delaying output by a packet or more. */
2024 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2025 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2026 decoded_frame_tb = avctx->time_base;
2027 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2028 decoded_frame->pts = decoded_frame->pkt_pts;
2029 decoded_frame_tb = ist->st->time_base;
2030 } else if (pkt->pts != AV_NOPTS_VALUE) {
2031 decoded_frame->pts = pkt->pts;
2032 decoded_frame_tb = ist->st->time_base;
2034 decoded_frame->pts = ist->dts;
2035 decoded_frame_tb = AV_TIME_BASE_Q;
2037 pkt->pts = AV_NOPTS_VALUE;
2038 if (decoded_frame->pts != AV_NOPTS_VALUE)
2039 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2040 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2041 (AVRational){1, avctx->sample_rate});
2042 ist->nb_samples = decoded_frame->nb_samples;
2043 for (i = 0; i < ist->nb_filters; i++) {
2044 if (i < ist->nb_filters - 1) {
2045 f = ist->filter_frame;
2046 err = av_frame_ref(f, decoded_frame);
2051 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2052 AV_BUFFERSRC_FLAG_PUSH);
2053 if (err == AVERROR_EOF)
2054 err = 0; /* ignore */
2058 decoded_frame->pts = AV_NOPTS_VALUE;
2060 av_frame_unref(ist->filter_frame);
2061 av_frame_unref(decoded_frame);
2062 return err < 0 ? err : ret;
2065 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2067 AVFrame *decoded_frame, *f;
2068 int i, ret = 0, err = 0, resample_changed;
2069 int64_t best_effort_timestamp;
2070 AVRational *frame_sample_aspect;
2072 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2073 return AVERROR(ENOMEM);
2074 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2075 return AVERROR(ENOMEM);
2076 decoded_frame = ist->decoded_frame;
2077 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2079 update_benchmark(NULL);
2080 ret = avcodec_decode_video2(ist->dec_ctx,
2081 decoded_frame, got_output, pkt);
2082 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2084 // The following line may be required in some cases where there is no parser
2085 // or the parser does not has_b_frames correctly
2086 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2087 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2088 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2090 av_log(ist->dec_ctx, AV_LOG_WARNING,
2091 "video_delay is larger in decoder than demuxer %d > %d.\n"
2092 "If you want to help, upload a sample "
2093 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2094 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2095 ist->dec_ctx->has_b_frames,
2096 ist->st->codecpar->video_delay);
2099 check_decode_result(ist, got_output, ret);
2101 if (*got_output && ret >= 0) {
2102 if (ist->dec_ctx->width != decoded_frame->width ||
2103 ist->dec_ctx->height != decoded_frame->height ||
2104 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2105 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2106 decoded_frame->width,
2107 decoded_frame->height,
2108 decoded_frame->format,
2109 ist->dec_ctx->width,
2110 ist->dec_ctx->height,
2111 ist->dec_ctx->pix_fmt);
2115 if (!*got_output || ret < 0)
2118 if(ist->top_field_first>=0)
2119 decoded_frame->top_field_first = ist->top_field_first;
2121 ist->frames_decoded++;
2123 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2124 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2128 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2130 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2131 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2132 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2134 if (ts != AV_NOPTS_VALUE)
2135 ist->next_pts = ist->pts = ts;
2139 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2140 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2141 ist->st->index, av_ts2str(decoded_frame->pts),
2142 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2143 best_effort_timestamp,
2144 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2145 decoded_frame->key_frame, decoded_frame->pict_type,
2146 ist->st->time_base.num, ist->st->time_base.den);
2151 if (ist->st->sample_aspect_ratio.num)
2152 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2154 resample_changed = ist->resample_width != decoded_frame->width ||
2155 ist->resample_height != decoded_frame->height ||
2156 ist->resample_pix_fmt != decoded_frame->format;
2157 if (resample_changed) {
2158 av_log(NULL, AV_LOG_INFO,
2159 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2160 ist->file_index, ist->st->index,
2161 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2162 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2164 ist->resample_width = decoded_frame->width;
2165 ist->resample_height = decoded_frame->height;
2166 ist->resample_pix_fmt = decoded_frame->format;
2168 for (i = 0; i < nb_filtergraphs; i++) {
2169 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2170 configure_filtergraph(filtergraphs[i]) < 0) {
2171 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2177 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2178 for (i = 0; i < ist->nb_filters; i++) {
2179 if (!frame_sample_aspect->num)
2180 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2182 if (i < ist->nb_filters - 1) {
2183 f = ist->filter_frame;
2184 err = av_frame_ref(f, decoded_frame);
2189 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2190 if (ret == AVERROR_EOF) {
2191 ret = 0; /* ignore */
2192 } else if (ret < 0) {
2193 av_log(NULL, AV_LOG_FATAL,
2194 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2200 av_frame_unref(ist->filter_frame);
2201 av_frame_unref(decoded_frame);
2202 return err < 0 ? err : ret;
2205 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2207 AVSubtitle subtitle;
2208 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2209 &subtitle, got_output, pkt);
2211 check_decode_result(NULL, got_output, ret);
2213 if (ret < 0 || !*got_output) {
2215 sub2video_flush(ist);
2219 if (ist->fix_sub_duration) {
2221 if (ist->prev_sub.got_output) {
2222 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2223 1000, AV_TIME_BASE);
2224 if (end < ist->prev_sub.subtitle.end_display_time) {
2225 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2226 "Subtitle duration reduced from %d to %d%s\n",
2227 ist->prev_sub.subtitle.end_display_time, end,
2228 end <= 0 ? ", dropping it" : "");
2229 ist->prev_sub.subtitle.end_display_time = end;
2232 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2233 FFSWAP(int, ret, ist->prev_sub.ret);
2234 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2242 sub2video_update(ist, &subtitle);
2244 if (!subtitle.num_rects)
2247 ist->frames_decoded++;
2249 for (i = 0; i < nb_output_streams; i++) {
2250 OutputStream *ost = output_streams[i];
2252 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2253 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2256 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2260 avsubtitle_free(&subtitle);
2264 static int send_filter_eof(InputStream *ist)
2267 for (i = 0; i < ist->nb_filters; i++) {
2268 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2275 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2276 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2282 if (!ist->saw_first_ts) {
2283 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2285 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2286 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2287 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2289 ist->saw_first_ts = 1;
2292 if (ist->next_dts == AV_NOPTS_VALUE)
2293 ist->next_dts = ist->dts;
2294 if (ist->next_pts == AV_NOPTS_VALUE)
2295 ist->next_pts = ist->pts;
2299 av_init_packet(&avpkt);
2307 if (pkt->dts != AV_NOPTS_VALUE) {
2308 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2309 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2310 ist->next_pts = ist->pts = ist->dts;
2313 // while we have more to decode or while the decoder did output something on EOF
2314 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2318 ist->pts = ist->next_pts;
2319 ist->dts = ist->next_dts;
2321 if (avpkt.size && avpkt.size != pkt->size &&
2322 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2323 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2324 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2325 ist->showed_multi_packet_warning = 1;
2328 switch (ist->dec_ctx->codec_type) {
2329 case AVMEDIA_TYPE_AUDIO:
2330 ret = decode_audio (ist, &avpkt, &got_output);
2332 case AVMEDIA_TYPE_VIDEO:
2333 ret = decode_video (ist, &avpkt, &got_output);
2334 if (avpkt.duration) {
2335 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2336 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2337 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2338 duration = ((int64_t)AV_TIME_BASE *
2339 ist->dec_ctx->framerate.den * ticks) /
2340 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2344 if(ist->dts != AV_NOPTS_VALUE && duration) {
2345 ist->next_dts += duration;
2347 ist->next_dts = AV_NOPTS_VALUE;
2350 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2352 case AVMEDIA_TYPE_SUBTITLE:
2353 ret = transcode_subtitles(ist, &avpkt, &got_output);
2360 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2361 ist->file_index, ist->st->index, av_err2str(ret));
2368 avpkt.pts= AV_NOPTS_VALUE;
2370 // touch data and size only if not EOF
2372 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2380 if (got_output && !pkt)
2384 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2385 /* except when looping we need to flush but not to send an EOF */
2386 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2387 int ret = send_filter_eof(ist);
2389 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2394 /* handle stream copy */
2395 if (!ist->decoding_needed) {
2396 ist->dts = ist->next_dts;
2397 switch (ist->dec_ctx->codec_type) {
2398 case AVMEDIA_TYPE_AUDIO:
2399 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2400 ist->dec_ctx->sample_rate;
2402 case AVMEDIA_TYPE_VIDEO:
2403 if (ist->framerate.num) {
2404 // TODO: Remove work-around for c99-to-c89 issue 7
2405 AVRational time_base_q = AV_TIME_BASE_Q;
2406 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2407 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2408 } else if (pkt->duration) {
2409 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2410 } else if(ist->dec_ctx->framerate.num != 0) {
2411 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2412 ist->next_dts += ((int64_t)AV_TIME_BASE *
2413 ist->dec_ctx->framerate.den * ticks) /
2414 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2418 ist->pts = ist->dts;
2419 ist->next_pts = ist->next_dts;
2421 for (i = 0; pkt && i < nb_output_streams; i++) {
2422 OutputStream *ost = output_streams[i];
2424 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2427 do_streamcopy(ist, ost, pkt);
2433 static void print_sdp(void)
2438 AVIOContext *sdp_pb;
2439 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2443 for (i = 0, j = 0; i < nb_output_files; i++) {
2444 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2445 avc[j] = output_files[i]->ctx;
2453 av_sdp_create(avc, j, sdp, sizeof(sdp));
2455 if (!sdp_filename) {
2456 printf("SDP:\n%s\n", sdp);
2459 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2460 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2462 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2463 avio_closep(&sdp_pb);
2464 av_freep(&sdp_filename);
2472 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2475 for (i = 0; hwaccels[i].name; i++)
2476 if (hwaccels[i].pix_fmt == pix_fmt)
2477 return &hwaccels[i];
2481 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2483 InputStream *ist = s->opaque;
2484 const enum AVPixelFormat *p;
2487 for (p = pix_fmts; *p != -1; p++) {
2488 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2489 const HWAccel *hwaccel;
2491 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2494 hwaccel = get_hwaccel(*p);
2496 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2497 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2500 ret = hwaccel->init(s);
2502 if (ist->hwaccel_id == hwaccel->id) {
2503 av_log(NULL, AV_LOG_FATAL,
2504 "%s hwaccel requested for input stream #%d:%d, "
2505 "but cannot be initialized.\n", hwaccel->name,
2506 ist->file_index, ist->st->index);
2507 return AV_PIX_FMT_NONE;
2511 ist->active_hwaccel_id = hwaccel->id;
2512 ist->hwaccel_pix_fmt = *p;
2519 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2521 InputStream *ist = s->opaque;
2523 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2524 return ist->hwaccel_get_buffer(s, frame, flags);
2526 return avcodec_default_get_buffer2(s, frame, flags);
2529 static int init_input_stream(int ist_index, char *error, int error_len)
2532 InputStream *ist = input_streams[ist_index];
2534 if (ist->decoding_needed) {
2535 AVCodec *codec = ist->dec;
2537 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2538 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2539 return AVERROR(EINVAL);
2542 ist->dec_ctx->opaque = ist;
2543 ist->dec_ctx->get_format = get_format;
2544 ist->dec_ctx->get_buffer2 = get_buffer;
2545 ist->dec_ctx->thread_safe_callbacks = 1;
2547 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2548 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2549 (ist->decoding_needed & DECODING_FOR_OST)) {
2550 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2551 if (ist->decoding_needed & DECODING_FOR_FILTER)
2552 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2555 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2557 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2558 * audio, and video decoders such as cuvid or mediacodec */
2559 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2561 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2562 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2563 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2564 if (ret == AVERROR_EXPERIMENTAL)
2565 abort_codec_experimental(codec, 0);
2567 snprintf(error, error_len,
2568 "Error while opening decoder for input stream "
2570 ist->file_index, ist->st->index, av_err2str(ret));
2573 assert_avoptions(ist->decoder_opts);
2576 ist->next_pts = AV_NOPTS_VALUE;
2577 ist->next_dts = AV_NOPTS_VALUE;
2582 static InputStream *get_input_stream(OutputStream *ost)
2584 if (ost->source_index >= 0)
2585 return input_streams[ost->source_index];
2589 static int compare_int64(const void *a, const void *b)
2591 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2594 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2598 if (ost->encoding_needed) {
2599 AVCodec *codec = ost->enc;
2600 AVCodecContext *dec = NULL;
2603 if ((ist = get_input_stream(ost)))
2605 if (dec && dec->subtitle_header) {
2606 /* ASS code assumes this buffer is null terminated so add extra byte. */
2607 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2608 if (!ost->enc_ctx->subtitle_header)
2609 return AVERROR(ENOMEM);
2610 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2611 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2613 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2614 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2615 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2617 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2618 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2619 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2621 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2622 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2623 if (!ost->enc_ctx->hw_frames_ctx)
2624 return AVERROR(ENOMEM);
2627 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2628 if (ret == AVERROR_EXPERIMENTAL)
2629 abort_codec_experimental(codec, 1);
2630 snprintf(error, error_len,
2631 "Error while opening encoder for output stream #%d:%d - "
2632 "maybe incorrect parameters such as bit_rate, rate, width or height",
2633 ost->file_index, ost->index);
2636 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2637 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2638 av_buffersink_set_frame_size(ost->filter->filter,
2639 ost->enc_ctx->frame_size);
2640 assert_avoptions(ost->encoder_opts);
2641 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2642 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2643 " It takes bits/s as argument, not kbits/s\n");
2645 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
2647 av_log(NULL, AV_LOG_FATAL,
2648 "Error initializing the output stream codec context.\n");
2652 * FIXME: this is only so that the bitstream filters and parsers (that still
2653 * work with a codec context) get the parameter values.
2654 * This should go away with the new BSF/parser API.
2656 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2660 if (ost->enc_ctx->nb_coded_side_data) {
2663 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2664 sizeof(*ost->st->side_data));
2665 if (!ost->st->side_data)
2666 return AVERROR(ENOMEM);
2668 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2669 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2670 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2672 sd_dst->data = av_malloc(sd_src->size);
2674 return AVERROR(ENOMEM);
2675 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2676 sd_dst->size = sd_src->size;
2677 sd_dst->type = sd_src->type;
2678 ost->st->nb_side_data++;
2682 // copy timebase while removing common factors
2683 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2684 ost->st->codec->codec= ost->enc_ctx->codec;
2685 } else if (ost->stream_copy) {
2686 // copy timebase while removing common factors
2687 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2690 * FIXME: this is only so that the bitstream filters and parsers (that still
2691 * work with a codec context) get the parameter values.
2692 * This should go away with the new BSF/parser API.
2694 ret = avcodec_parameters_to_context(ost->st->codec, ost->st->codecpar);
2702 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2703 AVCodecContext *avctx)
2706 int n = 1, i, size, index = 0;
2709 for (p = kf; *p; p++)
2713 pts = av_malloc_array(size, sizeof(*pts));
2715 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2720 for (i = 0; i < n; i++) {
2721 char *next = strchr(p, ',');
2726 if (!memcmp(p, "chapters", 8)) {
2728 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2731 if (avf->nb_chapters > INT_MAX - size ||
2732 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2734 av_log(NULL, AV_LOG_FATAL,
2735 "Could not allocate forced key frames array.\n");
2738 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2739 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2741 for (j = 0; j < avf->nb_chapters; j++) {
2742 AVChapter *c = avf->chapters[j];
2743 av_assert1(index < size);
2744 pts[index++] = av_rescale_q(c->start, c->time_base,
2745 avctx->time_base) + t;
2750 t = parse_time_or_die("force_key_frames", p, 1);
2751 av_assert1(index < size);
2752 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2759 av_assert0(index == size);
2760 qsort(pts, size, sizeof(*pts), compare_int64);
2761 ost->forced_kf_count = size;
2762 ost->forced_kf_pts = pts;
2765 static void report_new_stream(int input_index, AVPacket *pkt)
2767 InputFile *file = input_files[input_index];
2768 AVStream *st = file->ctx->streams[pkt->stream_index];
2770 if (pkt->stream_index < file->nb_streams_warn)
2772 av_log(file->ctx, AV_LOG_WARNING,
2773 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2774 av_get_media_type_string(st->codecpar->codec_type),
2775 input_index, pkt->stream_index,
2776 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2777 file->nb_streams_warn = pkt->stream_index + 1;
2780 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2782 AVDictionaryEntry *e;
2784 uint8_t *encoder_string;
2785 int encoder_string_len;
2786 int format_flags = 0;
2787 int codec_flags = 0;
2789 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2792 e = av_dict_get(of->opts, "fflags", NULL, 0);
2794 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2797 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2799 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2801 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2804 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2807 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2808 encoder_string = av_mallocz(encoder_string_len);
2809 if (!encoder_string)
2812 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2813 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2815 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2816 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2817 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2818 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2821 static int transcode_init(void)
2823 int ret = 0, i, j, k;
2824 AVFormatContext *oc;
2827 char error[1024] = {0};
2830 for (i = 0; i < nb_filtergraphs; i++) {
2831 FilterGraph *fg = filtergraphs[i];
2832 for (j = 0; j < fg->nb_outputs; j++) {
2833 OutputFilter *ofilter = fg->outputs[j];
2834 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2836 if (fg->nb_inputs != 1)
2838 for (k = nb_input_streams-1; k >= 0 ; k--)
2839 if (fg->inputs[0]->ist == input_streams[k])
2841 ofilter->ost->source_index = k;
2845 /* init framerate emulation */
2846 for (i = 0; i < nb_input_files; i++) {
2847 InputFile *ifile = input_files[i];
2848 if (ifile->rate_emu)
2849 for (j = 0; j < ifile->nb_streams; j++)
2850 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2853 /* for each output stream, we compute the right encoding parameters */
2854 for (i = 0; i < nb_output_streams; i++) {
2855 ost = output_streams[i];
2856 oc = output_files[ost->file_index]->ctx;
2857 ist = get_input_stream(ost);
2859 if (ost->attachment_filename)
2863 ost->st->disposition = ist->st->disposition;
2865 for (j=0; j<oc->nb_streams; j++) {
2866 AVStream *st = oc->streams[j];
2867 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
2870 if (j == oc->nb_streams)
2871 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
2872 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
2873 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2876 if (ost->stream_copy) {
2877 AVCodecParameters *par_dst = ost->st->codecpar;
2878 AVCodecParameters *par_src = ost->ref_par;
2880 uint64_t extra_size;
2882 av_assert0(ist && !ost->filter);
2884 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2885 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2887 av_log(NULL, AV_LOG_FATAL,
2888 "Error setting up codec context options.\n");
2891 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2893 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2895 if (extra_size > INT_MAX) {
2896 return AVERROR(EINVAL);
2899 /* if stream_copy is selected, no need to decode or encode */
2900 par_dst->codec_id = par_src->codec_id;
2901 par_dst->codec_type = par_src->codec_type;
2903 if (!par_dst->codec_tag) {
2904 unsigned int codec_tag;
2905 if (!oc->oformat->codec_tag ||
2906 av_codec_get_id (oc->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2907 !av_codec_get_tag2(oc->oformat->codec_tag, par_src->codec_id, &codec_tag))
2908 par_dst->codec_tag = par_src->codec_tag;
2911 par_dst->bit_rate = par_src->bit_rate;
2912 par_dst->field_order = par_src->field_order;
2913 par_dst->chroma_location = par_src->chroma_location;
2914 if (par_src->extradata_size) {
2915 par_dst->extradata = av_mallocz(extra_size);
2916 if (!par_dst->extradata) {
2917 return AVERROR(ENOMEM);
2919 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2921 par_dst->extradata_size= par_src->extradata_size;
2922 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2923 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2925 if (!ost->frame_rate.num)
2926 ost->frame_rate = ist->framerate;
2927 ost->st->avg_frame_rate = ost->frame_rate;
2929 ost->st->time_base = ist->st->time_base;
2931 ret = avformat_transfer_internal_stream_timing_info(oc->oformat, ost->st, ist->st, copy_tb);
2935 if (ist->st->nb_side_data) {
2936 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2937 sizeof(*ist->st->side_data));
2938 if (!ost->st->side_data)
2939 return AVERROR(ENOMEM);
2941 ost->st->nb_side_data = 0;
2942 for (j = 0; j < ist->st->nb_side_data; j++) {
2943 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2944 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2946 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2949 sd_dst->data = av_malloc(sd_src->size);
2951 return AVERROR(ENOMEM);
2952 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2953 sd_dst->size = sd_src->size;
2954 sd_dst->type = sd_src->type;
2955 ost->st->nb_side_data++;
2959 ost->parser = av_parser_init(par_dst->codec_id);
2961 switch (par_dst->codec_type) {
2962 case AVMEDIA_TYPE_AUDIO:
2963 if (audio_volume != 256) {
2964 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2967 par_dst->channel_layout = par_src->channel_layout;
2968 par_dst->sample_rate = par_src->sample_rate;
2969 par_dst->channels = par_src->channels;
2970 par_dst->frame_size = par_src->frame_size;
2971 par_dst->block_align = par_src->block_align;
2972 par_dst->initial_padding = par_src->initial_padding;
2973 par_dst->trailing_padding = par_src->trailing_padding;
2974 par_dst->profile = par_src->profile;
2975 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2976 par_dst->block_align= 0;
2977 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2978 par_dst->block_align= 0;
2980 case AVMEDIA_TYPE_VIDEO:
2981 par_dst->format = par_src->format;
2982 par_dst->color_space = par_src->color_space;
2983 par_dst->color_range = par_src->color_range;
2984 par_dst->color_primaries = par_src->color_primaries;
2985 par_dst->color_trc = par_src->color_trc;
2986 par_dst->width = par_src->width;
2987 par_dst->height = par_src->height;
2988 par_dst->video_delay = par_src->video_delay;
2989 par_dst->profile = par_src->profile;
2990 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2992 av_mul_q(ost->frame_aspect_ratio,
2993 (AVRational){ par_dst->height, par_dst->width });
2994 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2995 "with stream copy may produce invalid files\n");
2997 else if (ist->st->sample_aspect_ratio.num)
2998 sar = ist->st->sample_aspect_ratio;
3000 sar = par_src->sample_aspect_ratio;
3001 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3002 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3003 ost->st->r_frame_rate = ist->st->r_frame_rate;
3005 case AVMEDIA_TYPE_SUBTITLE:
3006 par_dst->width = par_src->width;
3007 par_dst->height = par_src->height;
3009 case AVMEDIA_TYPE_UNKNOWN:
3010 case AVMEDIA_TYPE_DATA:
3011 case AVMEDIA_TYPE_ATTACHMENT:
3017 AVCodecContext *enc_ctx = ost->enc_ctx;
3018 AVCodecContext *dec_ctx = NULL;
3020 set_encoder_id(output_files[ost->file_index], ost);
3023 dec_ctx = ist->dec_ctx;
3025 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3029 if (qsv_transcode_init(ost))
3034 if (cuvid_transcode_init(ost))
3038 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3039 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3040 filtergraph_is_simple(ost->filter->graph)) {
3041 FilterGraph *fg = ost->filter->graph;
3042 if (configure_filtergraph(fg)) {
3043 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3048 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3049 if (!ost->frame_rate.num)
3050 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3051 if (ist && !ost->frame_rate.num)
3052 ost->frame_rate = ist->framerate;
3053 if (ist && !ost->frame_rate.num)
3054 ost->frame_rate = ist->st->r_frame_rate;
3055 if (ist && !ost->frame_rate.num) {
3056 ost->frame_rate = (AVRational){25, 1};
3057 av_log(NULL, AV_LOG_WARNING,
3059 "about the input framerate is available. Falling "
3060 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3061 "if you want a different framerate.\n",
3062 ost->file_index, ost->index);
3064 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3065 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3066 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3067 ost->frame_rate = ost->enc->supported_framerates[idx];
3069 // reduce frame rate for mpeg4 to be within the spec limits
3070 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3071 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3072 ost->frame_rate.num, ost->frame_rate.den, 65535);
3076 switch (enc_ctx->codec_type) {
3077 case AVMEDIA_TYPE_AUDIO:
3078 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3080 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3081 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3082 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3083 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3084 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3085 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3087 case AVMEDIA_TYPE_VIDEO:
3088 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3089 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3090 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3091 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3092 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3093 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3094 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3096 for (j = 0; j < ost->forced_kf_count; j++)
3097 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3099 enc_ctx->time_base);
3101 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3102 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3103 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3104 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3105 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3106 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3107 if (!strncmp(ost->enc->name, "libx264", 7) &&
3108 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3109 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3110 av_log(NULL, AV_LOG_WARNING,
3111 "No pixel format specified, %s for H.264 encoding chosen.\n"
3112 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3113 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3114 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3115 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3116 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3117 av_log(NULL, AV_LOG_WARNING,
3118 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3119 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3120 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3121 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3123 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3124 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3126 ost->st->avg_frame_rate = ost->frame_rate;
3129 enc_ctx->width != dec_ctx->width ||
3130 enc_ctx->height != dec_ctx->height ||
3131 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3132 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3135 if (ost->forced_keyframes) {
3136 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3137 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3138 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3140 av_log(NULL, AV_LOG_ERROR,
3141 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3144 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3145 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3146 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3147 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3149 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3150 // parse it only for static kf timings
3151 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3152 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3156 case AVMEDIA_TYPE_SUBTITLE:
3157 enc_ctx->time_base = (AVRational){1, 1000};
3158 if (!enc_ctx->width) {
3159 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3160 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3163 case AVMEDIA_TYPE_DATA:
3171 if (ost->disposition) {
3172 static const AVOption opts[] = {
3173 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3174 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3175 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3176 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3177 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3178 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3179 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3180 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3181 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3182 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3183 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3184 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3185 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3186 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3189 static const AVClass class = {
3191 .item_name = av_default_item_name,
3193 .version = LIBAVUTIL_VERSION_INT,
3195 const AVClass *pclass = &class;
3197 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3203 /* init input streams */
3204 for (i = 0; i < nb_input_streams; i++)
3205 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3206 for (i = 0; i < nb_output_streams; i++) {
3207 ost = output_streams[i];
3208 avcodec_close(ost->enc_ctx);
3213 /* open each encoder */
3214 for (i = 0; i < nb_output_streams; i++) {
3215 ret = init_output_stream(output_streams[i], error, sizeof(error));
3220 /* discard unused programs */
3221 for (i = 0; i < nb_input_files; i++) {
3222 InputFile *ifile = input_files[i];
3223 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3224 AVProgram *p = ifile->ctx->programs[j];
3225 int discard = AVDISCARD_ALL;
3227 for (k = 0; k < p->nb_stream_indexes; k++)
3228 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3229 discard = AVDISCARD_DEFAULT;
3232 p->discard = discard;
3236 /* open files and write file headers */
3237 for (i = 0; i < nb_output_files; i++) {
3238 oc = output_files[i]->ctx;
3239 oc->interrupt_callback = int_cb;
3240 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3241 snprintf(error, sizeof(error),
3242 "Could not write header for output file #%d "
3243 "(incorrect codec parameters ?): %s",
3244 i, av_err2str(ret));
3245 ret = AVERROR(EINVAL);
3248 // assert_avoptions(output_files[i]->opts);
3249 if (strcmp(oc->oformat->name, "rtp")) {
3255 /* dump the file output parameters - cannot be done before in case
3257 for (i = 0; i < nb_output_files; i++) {
3258 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3261 /* dump the stream mapping */
3262 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3263 for (i = 0; i < nb_input_streams; i++) {
3264 ist = input_streams[i];
3266 for (j = 0; j < ist->nb_filters; j++) {
3267 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3268 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3269 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3270 ist->filters[j]->name);
3271 if (nb_filtergraphs > 1)
3272 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3273 av_log(NULL, AV_LOG_INFO, "\n");
3278 for (i = 0; i < nb_output_streams; i++) {
3279 ost = output_streams[i];
3281 if (ost->attachment_filename) {
3282 /* an attached file */
3283 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3284 ost->attachment_filename, ost->file_index, ost->index);
3288 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3289 /* output from a complex graph */
3290 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3291 if (nb_filtergraphs > 1)
3292 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3294 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3295 ost->index, ost->enc ? ost->enc->name : "?");
3299 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3300 input_streams[ost->source_index]->file_index,
3301 input_streams[ost->source_index]->st->index,
3304 if (ost->sync_ist != input_streams[ost->source_index])
3305 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3306 ost->sync_ist->file_index,
3307 ost->sync_ist->st->index);
3308 if (ost->stream_copy)
3309 av_log(NULL, AV_LOG_INFO, " (copy)");
3311 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3312 const AVCodec *out_codec = ost->enc;
3313 const char *decoder_name = "?";
3314 const char *in_codec_name = "?";
3315 const char *encoder_name = "?";
3316 const char *out_codec_name = "?";
3317 const AVCodecDescriptor *desc;
3320 decoder_name = in_codec->name;
3321 desc = avcodec_descriptor_get(in_codec->id);
3323 in_codec_name = desc->name;
3324 if (!strcmp(decoder_name, in_codec_name))
3325 decoder_name = "native";
3329 encoder_name = out_codec->name;
3330 desc = avcodec_descriptor_get(out_codec->id);
3332 out_codec_name = desc->name;
3333 if (!strcmp(encoder_name, out_codec_name))
3334 encoder_name = "native";
3337 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3338 in_codec_name, decoder_name,
3339 out_codec_name, encoder_name);
3341 av_log(NULL, AV_LOG_INFO, "\n");
3345 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3349 if (sdp_filename || want_sdp) {
3353 transcode_init_done = 1;
3358 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3359 static int need_output(void)
3363 for (i = 0; i < nb_output_streams; i++) {
3364 OutputStream *ost = output_streams[i];
3365 OutputFile *of = output_files[ost->file_index];
3366 AVFormatContext *os = output_files[ost->file_index]->ctx;
3368 if (ost->finished ||
3369 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3371 if (ost->frame_number >= ost->max_frames) {
3373 for (j = 0; j < of->ctx->nb_streams; j++)
3374 close_output_stream(output_streams[of->ost_index + j]);
3385 * Select the output stream to process.
3387 * @return selected output stream, or NULL if none available
3389 static OutputStream *choose_output(void)
3392 int64_t opts_min = INT64_MAX;
3393 OutputStream *ost_min = NULL;
3395 for (i = 0; i < nb_output_streams; i++) {
3396 OutputStream *ost = output_streams[i];
3397 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3398 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3400 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3401 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3403 if (!ost->finished && opts < opts_min) {
3405 ost_min = ost->unavailable ? NULL : ost;
3411 static void set_tty_echo(int on)
3415 if (tcgetattr(0, &tty) == 0) {
3416 if (on) tty.c_lflag |= ECHO;
3417 else tty.c_lflag &= ~ECHO;
3418 tcsetattr(0, TCSANOW, &tty);
3423 static int check_keyboard_interaction(int64_t cur_time)
3426 static int64_t last_time;
3427 if (received_nb_signals)
3428 return AVERROR_EXIT;
3429 /* read_key() returns 0 on EOF */
3430 if(cur_time - last_time >= 100000 && !run_as_daemon){
3432 last_time = cur_time;
3436 return AVERROR_EXIT;
3437 if (key == '+') av_log_set_level(av_log_get_level()+10);
3438 if (key == '-') av_log_set_level(av_log_get_level()-10);
3439 if (key == 's') qp_hist ^= 1;
3442 do_hex_dump = do_pkt_dump = 0;
3443 } else if(do_pkt_dump){
3447 av_log_set_level(AV_LOG_DEBUG);
3449 if (key == 'c' || key == 'C'){
3450 char buf[4096], target[64], command[256], arg[256] = {0};
3453 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3456 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3461 fprintf(stderr, "\n");
3463 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3464 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3465 target, time, command, arg);
3466 for (i = 0; i < nb_filtergraphs; i++) {
3467 FilterGraph *fg = filtergraphs[i];
3470 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3471 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3472 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3473 } else if (key == 'c') {
3474 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3475 ret = AVERROR_PATCHWELCOME;
3477 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3479 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3484 av_log(NULL, AV_LOG_ERROR,
3485 "Parse error, at least 3 arguments were expected, "
3486 "only %d given in string '%s'\n", n, buf);
3489 if (key == 'd' || key == 'D'){
3492 debug = input_streams[0]->st->codec->debug<<1;
3493 if(!debug) debug = 1;
3494 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3501 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3506 fprintf(stderr, "\n");
3507 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3508 fprintf(stderr,"error parsing debug value\n");
3510 for(i=0;i<nb_input_streams;i++) {
3511 input_streams[i]->st->codec->debug = debug;
3513 for(i=0;i<nb_output_streams;i++) {
3514 OutputStream *ost = output_streams[i];
3515 ost->enc_ctx->debug = debug;
3517 if(debug) av_log_set_level(AV_LOG_DEBUG);
3518 fprintf(stderr,"debug=%d\n", debug);
3521 fprintf(stderr, "key function\n"
3522 "? show this help\n"
3523 "+ increase verbosity\n"
3524 "- decrease verbosity\n"
3525 "c Send command to first matching filter supporting it\n"
3526 "C Send/Que command to all matching filters\n"
3527 "D cycle through available debug modes\n"
3528 "h dump packets/hex press to cycle through the 3 states\n"
3530 "s Show QP histogram\n"
3537 static void *input_thread(void *arg)
3540 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3545 ret = av_read_frame(f->ctx, &pkt);
3547 if (ret == AVERROR(EAGAIN)) {
3552 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3555 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3556 if (flags && ret == AVERROR(EAGAIN)) {
3558 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3559 av_log(f->ctx, AV_LOG_WARNING,
3560 "Thread message queue blocking; consider raising the "
3561 "thread_queue_size option (current value: %d)\n",
3562 f->thread_queue_size);
3565 if (ret != AVERROR_EOF)
3566 av_log(f->ctx, AV_LOG_ERROR,
3567 "Unable to send packet to main thread: %s\n",
3569 av_packet_unref(&pkt);
3570 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3578 static void free_input_threads(void)
3582 for (i = 0; i < nb_input_files; i++) {
3583 InputFile *f = input_files[i];
3586 if (!f || !f->in_thread_queue)
3588 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3589 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3590 av_packet_unref(&pkt);
3592 pthread_join(f->thread, NULL);
3594 av_thread_message_queue_free(&f->in_thread_queue);
3598 static int init_input_threads(void)
3602 if (nb_input_files == 1)
3605 for (i = 0; i < nb_input_files; i++) {
3606 InputFile *f = input_files[i];
3608 if (f->ctx->pb ? !f->ctx->pb->seekable :
3609 strcmp(f->ctx->iformat->name, "lavfi"))
3610 f->non_blocking = 1;
3611 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3612 f->thread_queue_size, sizeof(AVPacket));
3616 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3617 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3618 av_thread_message_queue_free(&f->in_thread_queue);
3619 return AVERROR(ret);
3625 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3627 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3629 AV_THREAD_MESSAGE_NONBLOCK : 0);
3633 static int get_input_packet(InputFile *f, AVPacket *pkt)
3637 for (i = 0; i < f->nb_streams; i++) {
3638 InputStream *ist = input_streams[f->ist_index + i];
3639 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3640 int64_t now = av_gettime_relative() - ist->start;
3642 return AVERROR(EAGAIN);
3647 if (nb_input_files > 1)
3648 return get_input_packet_mt(f, pkt);
3650 return av_read_frame(f->ctx, pkt);
3653 static int got_eagain(void)
3656 for (i = 0; i < nb_output_streams; i++)
3657 if (output_streams[i]->unavailable)
3662 static void reset_eagain(void)
3665 for (i = 0; i < nb_input_files; i++)
3666 input_files[i]->eagain = 0;
3667 for (i = 0; i < nb_output_streams; i++)
3668 output_streams[i]->unavailable = 0;
3671 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3672 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3673 AVRational time_base)
3679 return tmp_time_base;
3682 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3685 return tmp_time_base;
3691 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3694 AVCodecContext *avctx;
3695 int i, ret, has_audio = 0;
3696 int64_t duration = 0;
3698 ret = av_seek_frame(is, -1, is->start_time, 0);
3702 for (i = 0; i < ifile->nb_streams; i++) {
3703 ist = input_streams[ifile->ist_index + i];
3704 avctx = ist->dec_ctx;
3707 if (ist->decoding_needed) {
3708 process_input_packet(ist, NULL, 1);
3709 avcodec_flush_buffers(avctx);
3712 /* duration is the length of the last frame in a stream
3713 * when audio stream is present we don't care about
3714 * last video frame length because it's not defined exactly */
3715 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3719 for (i = 0; i < ifile->nb_streams; i++) {
3720 ist = input_streams[ifile->ist_index + i];
3721 avctx = ist->dec_ctx;
3724 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3725 AVRational sample_rate = {1, avctx->sample_rate};
3727 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3731 if (ist->framerate.num) {
3732 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3733 } else if (ist->st->avg_frame_rate.num) {
3734 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3735 } else duration = 1;
3737 if (!ifile->duration)
3738 ifile->time_base = ist->st->time_base;
3739 /* the total duration of the stream, max_pts - min_pts is
3740 * the duration of the stream without the last frame */
3741 duration += ist->max_pts - ist->min_pts;
3742 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3746 if (ifile->loop > 0)
3754 * - 0 -- one packet was read and processed
3755 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3756 * this function should be called again
3757 * - AVERROR_EOF -- this function should not be called again
3759 static int process_input(int file_index)
3761 InputFile *ifile = input_files[file_index];
3762 AVFormatContext *is;
3770 ret = get_input_packet(ifile, &pkt);
3772 if (ret == AVERROR(EAGAIN)) {
3776 if (ret < 0 && ifile->loop) {
3777 if ((ret = seek_to_start(ifile, is)) < 0)
3779 ret = get_input_packet(ifile, &pkt);
3780 if (ret == AVERROR(EAGAIN)) {
3786 if (ret != AVERROR_EOF) {
3787 print_error(is->filename, ret);
3792 for (i = 0; i < ifile->nb_streams; i++) {
3793 ist = input_streams[ifile->ist_index + i];
3794 if (ist->decoding_needed) {
3795 ret = process_input_packet(ist, NULL, 0);
3800 /* mark all outputs that don't go through lavfi as finished */
3801 for (j = 0; j < nb_output_streams; j++) {
3802 OutputStream *ost = output_streams[j];
3804 if (ost->source_index == ifile->ist_index + i &&
3805 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3806 finish_output_stream(ost);
3810 ifile->eof_reached = 1;
3811 return AVERROR(EAGAIN);
3817 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3818 is->streams[pkt.stream_index]);
3820 /* the following test is needed in case new streams appear
3821 dynamically in stream : we ignore them */
3822 if (pkt.stream_index >= ifile->nb_streams) {
3823 report_new_stream(file_index, &pkt);
3824 goto discard_packet;
3827 ist = input_streams[ifile->ist_index + pkt.stream_index];
3829 ist->data_size += pkt.size;
3833 goto discard_packet;
3835 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3836 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3841 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3842 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3843 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3844 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3845 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3846 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3847 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3848 av_ts2str(input_files[ist->file_index]->ts_offset),
3849 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3852 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3853 int64_t stime, stime2;
3854 // Correcting starttime based on the enabled streams
3855 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3856 // so we instead do it here as part of discontinuity handling
3857 if ( ist->next_dts == AV_NOPTS_VALUE
3858 && ifile->ts_offset == -is->start_time
3859 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3860 int64_t new_start_time = INT64_MAX;
3861 for (i=0; i<is->nb_streams; i++) {
3862 AVStream *st = is->streams[i];
3863 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3865 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3867 if (new_start_time > is->start_time) {
3868 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3869 ifile->ts_offset = -new_start_time;
3873 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3874 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3875 ist->wrap_correction_done = 1;
3877 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3878 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3879 ist->wrap_correction_done = 0;
3881 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3882 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3883 ist->wrap_correction_done = 0;
3887 /* add the stream-global side data to the first packet */
3888 if (ist->nb_packets == 1) {
3889 if (ist->st->nb_side_data)
3890 av_packet_split_side_data(&pkt);
3891 for (i = 0; i < ist->st->nb_side_data; i++) {
3892 AVPacketSideData *src_sd = &ist->st->side_data[i];
3895 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3897 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3900 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3904 memcpy(dst_data, src_sd->data, src_sd->size);
3908 if (pkt.dts != AV_NOPTS_VALUE)
3909 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3910 if (pkt.pts != AV_NOPTS_VALUE)
3911 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3913 if (pkt.pts != AV_NOPTS_VALUE)
3914 pkt.pts *= ist->ts_scale;
3915 if (pkt.dts != AV_NOPTS_VALUE)
3916 pkt.dts *= ist->ts_scale;
3918 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3919 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3920 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3921 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3922 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3923 int64_t delta = pkt_dts - ifile->last_ts;
3924 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3925 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3926 ifile->ts_offset -= delta;
3927 av_log(NULL, AV_LOG_DEBUG,
3928 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3929 delta, ifile->ts_offset);
3930 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3931 if (pkt.pts != AV_NOPTS_VALUE)
3932 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3936 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3937 if (pkt.pts != AV_NOPTS_VALUE) {
3938 pkt.pts += duration;
3939 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3940 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3943 if (pkt.dts != AV_NOPTS_VALUE)
3944 pkt.dts += duration;
3946 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3947 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3948 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3949 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3951 int64_t delta = pkt_dts - ist->next_dts;
3952 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3953 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3954 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3955 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3956 ifile->ts_offset -= delta;
3957 av_log(NULL, AV_LOG_DEBUG,
3958 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3959 delta, ifile->ts_offset);
3960 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3961 if (pkt.pts != AV_NOPTS_VALUE)
3962 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3965 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3966 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3967 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3968 pkt.dts = AV_NOPTS_VALUE;
3970 if (pkt.pts != AV_NOPTS_VALUE){
3971 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3972 delta = pkt_pts - ist->next_dts;
3973 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3974 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3975 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3976 pkt.pts = AV_NOPTS_VALUE;
3982 if (pkt.dts != AV_NOPTS_VALUE)
3983 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3986 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3987 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3988 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3989 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3990 av_ts2str(input_files[ist->file_index]->ts_offset),
3991 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3994 sub2video_heartbeat(ist, pkt.pts);
3996 process_input_packet(ist, &pkt, 0);
3999 av_packet_unref(&pkt);
4005 * Perform a step of transcoding for the specified filter graph.
4007 * @param[in] graph filter graph to consider
4008 * @param[out] best_ist input stream where a frame would allow to continue
4009 * @return 0 for success, <0 for error
4011 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4014 int nb_requests, nb_requests_max = 0;
4015 InputFilter *ifilter;
4019 ret = avfilter_graph_request_oldest(graph->graph);
4021 return reap_filters(0);
4023 if (ret == AVERROR_EOF) {
4024 ret = reap_filters(1);
4025 for (i = 0; i < graph->nb_outputs; i++)
4026 close_output_stream(graph->outputs[i]->ost);
4029 if (ret != AVERROR(EAGAIN))
4032 for (i = 0; i < graph->nb_inputs; i++) {
4033 ifilter = graph->inputs[i];
4035 if (input_files[ist->file_index]->eagain ||
4036 input_files[ist->file_index]->eof_reached)
4038 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4039 if (nb_requests > nb_requests_max) {
4040 nb_requests_max = nb_requests;
4046 for (i = 0; i < graph->nb_outputs; i++)
4047 graph->outputs[i]->ost->unavailable = 1;
4053 * Run a single step of transcoding.
4055 * @return 0 for success, <0 for error
4057 static int transcode_step(void)
4063 ost = choose_output();
4070 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4075 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4080 av_assert0(ost->source_index >= 0);
4081 ist = input_streams[ost->source_index];
4084 ret = process_input(ist->file_index);
4085 if (ret == AVERROR(EAGAIN)) {
4086 if (input_files[ist->file_index]->eagain)
4087 ost->unavailable = 1;
4092 return ret == AVERROR_EOF ? 0 : ret;
4094 return reap_filters(0);
4098 * The following code is the main loop of the file converter
4100 static int transcode(void)
4103 AVFormatContext *os;
4106 int64_t timer_start;
4107 int64_t total_packets_written = 0;
4109 ret = transcode_init();
4113 if (stdin_interaction) {
4114 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4117 timer_start = av_gettime_relative();
4120 if ((ret = init_input_threads()) < 0)
4124 while (!received_sigterm) {
4125 int64_t cur_time= av_gettime_relative();
4127 /* if 'q' pressed, exits */
4128 if (stdin_interaction)
4129 if (check_keyboard_interaction(cur_time) < 0)
4132 /* check if there's any stream where output is still needed */
4133 if (!need_output()) {
4134 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4138 ret = transcode_step();
4139 if (ret < 0 && ret != AVERROR_EOF) {
4141 av_strerror(ret, errbuf, sizeof(errbuf));
4143 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4147 /* dump report by using the output first video and audio streams */
4148 print_report(0, timer_start, cur_time);
4151 free_input_threads();
4154 /* at the end of stream, we must flush the decoder buffers */
4155 for (i = 0; i < nb_input_streams; i++) {
4156 ist = input_streams[i];
4157 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4158 process_input_packet(ist, NULL, 0);
4165 /* write the trailer if needed and close file */
4166 for (i = 0; i < nb_output_files; i++) {
4167 os = output_files[i]->ctx;
4168 if ((ret = av_write_trailer(os)) < 0) {
4169 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4175 /* dump report by using the first video and audio streams */
4176 print_report(1, timer_start, av_gettime_relative());
4178 /* close each encoder */
4179 for (i = 0; i < nb_output_streams; i++) {
4180 ost = output_streams[i];
4181 if (ost->encoding_needed) {
4182 av_freep(&ost->enc_ctx->stats_in);
4184 total_packets_written += ost->packets_written;
4187 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4188 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4192 /* close each decoder */
4193 for (i = 0; i < nb_input_streams; i++) {
4194 ist = input_streams[i];
4195 if (ist->decoding_needed) {
4196 avcodec_close(ist->dec_ctx);
4197 if (ist->hwaccel_uninit)
4198 ist->hwaccel_uninit(ist->dec_ctx);
4202 av_buffer_unref(&hw_device_ctx);
4209 free_input_threads();
4212 if (output_streams) {
4213 for (i = 0; i < nb_output_streams; i++) {
4214 ost = output_streams[i];
4217 if (fclose(ost->logfile))
4218 av_log(NULL, AV_LOG_ERROR,
4219 "Error closing logfile, loss of information possible: %s\n",
4220 av_err2str(AVERROR(errno)));
4221 ost->logfile = NULL;
4223 av_freep(&ost->forced_kf_pts);
4224 av_freep(&ost->apad);
4225 av_freep(&ost->disposition);
4226 av_dict_free(&ost->encoder_opts);
4227 av_dict_free(&ost->sws_dict);
4228 av_dict_free(&ost->swr_opts);
4229 av_dict_free(&ost->resample_opts);
4237 static int64_t getutime(void)
4240 struct rusage rusage;
4242 getrusage(RUSAGE_SELF, &rusage);
4243 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4244 #elif HAVE_GETPROCESSTIMES
4246 FILETIME c, e, k, u;
4247 proc = GetCurrentProcess();
4248 GetProcessTimes(proc, &c, &e, &k, &u);
4249 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4251 return av_gettime_relative();
4255 static int64_t getmaxrss(void)
4257 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4258 struct rusage rusage;
4259 getrusage(RUSAGE_SELF, &rusage);
4260 return (int64_t)rusage.ru_maxrss * 1024;
4261 #elif HAVE_GETPROCESSMEMORYINFO
4263 PROCESS_MEMORY_COUNTERS memcounters;
4264 proc = GetCurrentProcess();
4265 memcounters.cb = sizeof(memcounters);
4266 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4267 return memcounters.PeakPagefileUsage;
4273 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4277 int main(int argc, char **argv)
4284 register_exit(ffmpeg_cleanup);
4286 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4288 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4289 parse_loglevel(argc, argv, options);
4291 if(argc>1 && !strcmp(argv[1], "-d")){
4293 av_log_set_callback(log_callback_null);
4298 avcodec_register_all();
4300 avdevice_register_all();
4302 avfilter_register_all();
4304 avformat_network_init();
4306 show_banner(argc, argv, options);
4310 /* parse options and open all input/output files */
4311 ret = ffmpeg_parse_options(argc, argv);
4315 if (nb_output_files <= 0 && nb_input_files == 0) {
4317 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4321 /* file converter / grab */
4322 if (nb_output_files <= 0) {
4323 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4327 // if (nb_input_files == 0) {
4328 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4332 current_time = ti = getutime();
4333 if (transcode() < 0)
4335 ti = getutime() - ti;
4337 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4339 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4340 decode_error_stat[0], decode_error_stat[1]);
4341 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4344 exit_program(received_nb_signals ? 255 : main_return_code);
4345 return main_return_code;