2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
201 pal = (uint32_t *)r->data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
369 if (!run_as_daemon && stdin_interaction) {
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
509 for (j = 0; j < ost->nb_bitstream_filters; j++)
510 av_bsf_free(&ost->bsf_ctx[j]);
511 av_freep(&ost->bsf_ctx);
512 av_freep(&ost->bsf_extradata_updated);
514 av_frame_free(&ost->filtered_frame);
515 av_frame_free(&ost->last_frame);
516 av_dict_free(&ost->encoder_opts);
518 av_parser_close(ost->parser);
519 avcodec_free_context(&ost->parser_avctx);
521 av_freep(&ost->forced_keyframes);
522 av_expr_free(ost->forced_keyframes_pexpr);
523 av_freep(&ost->avfilter);
524 av_freep(&ost->logfile_prefix);
526 av_freep(&ost->audio_channels_map);
527 ost->audio_channels_mapped = 0;
529 av_dict_free(&ost->sws_dict);
531 avcodec_free_context(&ost->enc_ctx);
532 avcodec_parameters_free(&ost->ref_par);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
553 av_freep(&ist->dts_buffer);
555 avcodec_free_context(&ist->dec_ctx);
557 av_freep(&input_streams[i]);
561 if (fclose(vstats_file))
562 av_log(NULL, AV_LOG_ERROR,
563 "Error closing vstats file, loss of information possible: %s\n",
564 av_err2str(AVERROR(errno)));
566 av_freep(&vstats_filename);
568 av_freep(&input_streams);
569 av_freep(&input_files);
570 av_freep(&output_streams);
571 av_freep(&output_files);
575 avformat_network_deinit();
577 if (received_sigterm) {
578 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579 (int) received_sigterm);
580 } else if (ret && transcode_init_done) {
581 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
587 void remove_avoptions(AVDictionary **a, AVDictionary *b)
589 AVDictionaryEntry *t = NULL;
591 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
596 void assert_avoptions(AVDictionary *m)
598 AVDictionaryEntry *t;
599 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
605 static void abort_codec_experimental(AVCodec *c, int encoder)
610 static void update_benchmark(const char *fmt, ...)
612 if (do_benchmark_all) {
613 int64_t t = getutime();
619 vsnprintf(buf, sizeof(buf), fmt, va);
621 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
630 for (i = 0; i < nb_output_streams; i++) {
631 OutputStream *ost2 = output_streams[i];
632 ost2->finished |= ost == ost2 ? this_stream : others;
636 static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
638 AVStream *st = ost->st;
641 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
642 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
643 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
646 * Audio encoders may split the packets -- #frames in != #packets out.
647 * But there is no reordering, so we can limit the number of output packets
648 * by simply dropping them here.
649 * Counting encoded video frames needs to be done separately because of
650 * reordering, see do_video_out()
652 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
653 if (ost->frame_number >= ost->max_frames) {
654 av_packet_unref(pkt);
659 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
661 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
663 ost->quality = sd ? AV_RL32(sd) : -1;
664 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
666 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
668 ost->error[i] = AV_RL64(sd + 8 + 8*i);
673 if (ost->frame_rate.num && ost->is_cfr) {
674 if (pkt->duration > 0)
675 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
676 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
681 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
682 if (pkt->dts != AV_NOPTS_VALUE &&
683 pkt->pts != AV_NOPTS_VALUE &&
684 pkt->dts > pkt->pts) {
685 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
687 ost->file_index, ost->st->index);
689 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
690 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
691 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
693 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
694 pkt->dts != AV_NOPTS_VALUE &&
695 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
696 ost->last_mux_dts != AV_NOPTS_VALUE) {
697 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
698 if (pkt->dts < max) {
699 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
700 av_log(s, loglevel, "Non-monotonous DTS in output stream "
701 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
702 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
704 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
707 av_log(s, loglevel, "changing to %"PRId64". This may result "
708 "in incorrect timestamps in the output file.\n",
710 if (pkt->pts >= pkt->dts)
711 pkt->pts = FFMAX(pkt->pts, max);
716 ost->last_mux_dts = pkt->dts;
718 ost->data_size += pkt->size;
719 ost->packets_written++;
721 pkt->stream_index = ost->index;
724 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
725 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
726 av_get_media_type_string(ost->enc_ctx->codec_type),
727 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
728 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
733 ret = av_interleaved_write_frame(s, pkt);
735 print_error("av_interleaved_write_frame()", ret);
736 main_return_code = 1;
737 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
739 av_packet_unref(pkt);
742 static void close_output_stream(OutputStream *ost)
744 OutputFile *of = output_files[ost->file_index];
746 ost->finished |= ENCODER_FINISHED;
748 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
749 of->recording_time = FFMIN(of->recording_time, end);
753 static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
757 /* apply the output bitstream filters, if any */
758 if (ost->nb_bitstream_filters) {
761 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
767 /* get a packet from the previous filter up the chain */
768 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
769 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
770 * the api states this shouldn't happen after init(). Propagate it here to the
771 * muxer and to the next filters in the chain to workaround this.
772 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
773 * par_out->extradata and adapt muxers accordingly to get rid of this. */
774 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
775 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
778 ost->bsf_extradata_updated[idx - 1] |= 1;
780 if (ret == AVERROR(EAGAIN)) {
787 /* send it to the next filter down the chain or to the muxer */
788 if (idx < ost->nb_bitstream_filters) {
789 /* HACK/FIXME! - See above */
790 if (!(ost->bsf_extradata_updated[idx] & 2)) {
791 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
794 ost->bsf_extradata_updated[idx] |= 2;
796 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
801 write_packet(s, pkt, ost);
804 write_packet(s, pkt, ost);
807 if (ret < 0 && ret != AVERROR_EOF) {
808 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
809 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
815 static int check_recording_time(OutputStream *ost)
817 OutputFile *of = output_files[ost->file_index];
819 if (of->recording_time != INT64_MAX &&
820 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
821 AV_TIME_BASE_Q) >= 0) {
822 close_output_stream(ost);
828 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
831 AVCodecContext *enc = ost->enc_ctx;
835 av_init_packet(&pkt);
839 if (!check_recording_time(ost))
842 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
843 frame->pts = ost->sync_opts;
844 ost->sync_opts = frame->pts + frame->nb_samples;
845 ost->samples_encoded += frame->nb_samples;
846 ost->frames_encoded++;
848 av_assert0(pkt.size || !pkt.data);
849 update_benchmark(NULL);
851 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
852 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
853 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
854 enc->time_base.num, enc->time_base.den);
857 ret = avcodec_send_frame(enc, frame);
862 ret = avcodec_receive_packet(enc, &pkt);
863 if (ret == AVERROR(EAGAIN))
868 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
870 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
873 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
874 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
875 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
876 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
879 output_packet(s, &pkt, ost);
884 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
888 static void do_subtitle_out(AVFormatContext *s,
893 int subtitle_out_max_size = 1024 * 1024;
894 int subtitle_out_size, nb, i;
899 if (sub->pts == AV_NOPTS_VALUE) {
900 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
909 subtitle_out = av_malloc(subtitle_out_max_size);
911 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
916 /* Note: DVB subtitle need one packet to draw them and one other
917 packet to clear them */
918 /* XXX: signal it in the codec context ? */
919 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
924 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
926 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
927 pts -= output_files[ost->file_index]->start_time;
928 for (i = 0; i < nb; i++) {
929 unsigned save_num_rects = sub->num_rects;
931 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
932 if (!check_recording_time(ost))
936 // start_display_time is required to be 0
937 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
938 sub->end_display_time -= sub->start_display_time;
939 sub->start_display_time = 0;
943 ost->frames_encoded++;
945 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
946 subtitle_out_max_size, sub);
948 sub->num_rects = save_num_rects;
949 if (subtitle_out_size < 0) {
950 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
954 av_init_packet(&pkt);
955 pkt.data = subtitle_out;
956 pkt.size = subtitle_out_size;
957 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
958 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
959 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
960 /* XXX: the pts correction is handled here. Maybe handling
961 it in the codec would be better */
963 pkt.pts += 90 * sub->start_display_time;
965 pkt.pts += 90 * sub->end_display_time;
968 output_packet(s, &pkt, ost);
972 static void do_video_out(AVFormatContext *s,
974 AVFrame *next_picture,
977 int ret, format_video_sync;
979 AVCodecContext *enc = ost->enc_ctx;
980 AVCodecParameters *mux_par = ost->st->codecpar;
981 int nb_frames, nb0_frames, i;
982 double delta, delta0;
985 InputStream *ist = NULL;
986 AVFilterContext *filter = ost->filter->filter;
988 if (ost->source_index >= 0)
989 ist = input_streams[ost->source_index];
991 if (filter->inputs[0]->frame_rate.num > 0 &&
992 filter->inputs[0]->frame_rate.den > 0)
993 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
995 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
996 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
998 if (!ost->filters_script &&
1002 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1003 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1006 if (!next_picture) {
1008 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1009 ost->last_nb0_frames[1],
1010 ost->last_nb0_frames[2]);
1012 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1013 delta = delta0 + duration;
1015 /* by default, we output a single frame */
1016 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1019 format_video_sync = video_sync_method;
1020 if (format_video_sync == VSYNC_AUTO) {
1021 if(!strcmp(s->oformat->name, "avi")) {
1022 format_video_sync = VSYNC_VFR;
1024 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1026 && format_video_sync == VSYNC_CFR
1027 && input_files[ist->file_index]->ctx->nb_streams == 1
1028 && input_files[ist->file_index]->input_ts_offset == 0) {
1029 format_video_sync = VSYNC_VSCFR;
1031 if (format_video_sync == VSYNC_CFR && copy_ts) {
1032 format_video_sync = VSYNC_VSCFR;
1035 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1039 format_video_sync != VSYNC_PASSTHROUGH &&
1040 format_video_sync != VSYNC_DROP) {
1041 if (delta0 < -0.6) {
1042 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1044 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1045 sync_ipts = ost->sync_opts;
1050 switch (format_video_sync) {
1052 if (ost->frame_number == 0 && delta0 >= 0.5) {
1053 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1056 ost->sync_opts = lrint(sync_ipts);
1059 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1060 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1062 } else if (delta < -1.1)
1064 else if (delta > 1.1) {
1065 nb_frames = lrintf(delta);
1067 nb0_frames = lrintf(delta0 - 0.6);
1073 else if (delta > 0.6)
1074 ost->sync_opts = lrint(sync_ipts);
1077 case VSYNC_PASSTHROUGH:
1078 ost->sync_opts = lrint(sync_ipts);
1085 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1086 nb0_frames = FFMIN(nb0_frames, nb_frames);
1088 memmove(ost->last_nb0_frames + 1,
1089 ost->last_nb0_frames,
1090 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1091 ost->last_nb0_frames[0] = nb0_frames;
1093 if (nb0_frames == 0 && ost->last_dropped) {
1095 av_log(NULL, AV_LOG_VERBOSE,
1096 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1097 ost->frame_number, ost->st->index, ost->last_frame->pts);
1099 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1100 if (nb_frames > dts_error_threshold * 30) {
1101 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1105 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1106 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1108 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1110 /* duplicates frame if needed */
1111 for (i = 0; i < nb_frames; i++) {
1112 AVFrame *in_picture;
1113 av_init_packet(&pkt);
1117 if (i < nb0_frames && ost->last_frame) {
1118 in_picture = ost->last_frame;
1120 in_picture = next_picture;
1125 in_picture->pts = ost->sync_opts;
1128 if (!check_recording_time(ost))
1130 if (ost->frame_number >= ost->max_frames)
1134 #if FF_API_LAVF_FMT_RAWPICTURE
1135 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1136 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1137 /* raw pictures are written as AVPicture structure to
1138 avoid any copies. We support temporarily the older
1140 if (in_picture->interlaced_frame)
1141 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1143 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1144 pkt.data = (uint8_t *)in_picture;
1145 pkt.size = sizeof(AVPicture);
1146 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1147 pkt.flags |= AV_PKT_FLAG_KEY;
1149 output_packet(s, &pkt, ost);
1153 int forced_keyframe = 0;
1156 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1157 ost->top_field_first >= 0)
1158 in_picture->top_field_first = !!ost->top_field_first;
1160 if (in_picture->interlaced_frame) {
1161 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1162 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1164 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1166 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1168 in_picture->quality = enc->global_quality;
1169 in_picture->pict_type = 0;
1171 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1172 in_picture->pts * av_q2d(enc->time_base) : NAN;
1173 if (ost->forced_kf_index < ost->forced_kf_count &&
1174 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1175 ost->forced_kf_index++;
1176 forced_keyframe = 1;
1177 } else if (ost->forced_keyframes_pexpr) {
1179 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1180 res = av_expr_eval(ost->forced_keyframes_pexpr,
1181 ost->forced_keyframes_expr_const_values, NULL);
1182 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1183 ost->forced_keyframes_expr_const_values[FKF_N],
1184 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1185 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1186 ost->forced_keyframes_expr_const_values[FKF_T],
1187 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1190 forced_keyframe = 1;
1191 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1192 ost->forced_keyframes_expr_const_values[FKF_N];
1193 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1194 ost->forced_keyframes_expr_const_values[FKF_T];
1195 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1198 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1199 } else if ( ost->forced_keyframes
1200 && !strncmp(ost->forced_keyframes, "source", 6)
1201 && in_picture->key_frame==1) {
1202 forced_keyframe = 1;
1205 if (forced_keyframe) {
1206 in_picture->pict_type = AV_PICTURE_TYPE_I;
1207 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1210 update_benchmark(NULL);
1212 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1213 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1214 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1215 enc->time_base.num, enc->time_base.den);
1218 ost->frames_encoded++;
1220 ret = avcodec_send_frame(enc, in_picture);
1225 ret = avcodec_receive_packet(enc, &pkt);
1226 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1227 if (ret == AVERROR(EAGAIN))
1233 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1234 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1235 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1236 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1239 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1240 pkt.pts = ost->sync_opts;
1242 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1245 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1246 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1247 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1248 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1251 frame_size = pkt.size;
1252 output_packet(s, &pkt, ost);
1254 /* if two pass, output log */
1255 if (ost->logfile && enc->stats_out) {
1256 fprintf(ost->logfile, "%s", enc->stats_out);
1262 * For video, number of frames in == number of packets out.
1263 * But there may be reordering, so we can't throw away frames on encoder
1264 * flush, we need to limit them here, before they go into encoder.
1266 ost->frame_number++;
1268 if (vstats_filename && frame_size)
1269 do_video_stats(ost, frame_size);
1272 if (!ost->last_frame)
1273 ost->last_frame = av_frame_alloc();
1274 av_frame_unref(ost->last_frame);
1275 if (next_picture && ost->last_frame)
1276 av_frame_ref(ost->last_frame, next_picture);
1278 av_frame_free(&ost->last_frame);
1282 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1286 static double psnr(double d)
1288 return -10.0 * log10(d);
1291 static void do_video_stats(OutputStream *ost, int frame_size)
1293 AVCodecContext *enc;
1295 double ti1, bitrate, avg_bitrate;
1297 /* this is executed just the first time do_video_stats is called */
1299 vstats_file = fopen(vstats_filename, "w");
1307 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1308 frame_number = ost->st->nb_frames;
1309 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1310 ost->quality / (float)FF_QP2LAMBDA);
1312 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1313 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1315 fprintf(vstats_file,"f_size= %6d ", frame_size);
1316 /* compute pts value */
1317 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1321 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1322 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1323 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1324 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1325 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1329 static void finish_output_stream(OutputStream *ost)
1331 OutputFile *of = output_files[ost->file_index];
1334 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1337 for (i = 0; i < of->ctx->nb_streams; i++)
1338 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1343 * Get and encode new output from any of the filtergraphs, without causing
1346 * @return 0 for success, <0 for severe errors
1348 static int reap_filters(int flush)
1350 AVFrame *filtered_frame = NULL;
1353 /* Reap all buffers present in the buffer sinks */
1354 for (i = 0; i < nb_output_streams; i++) {
1355 OutputStream *ost = output_streams[i];
1356 OutputFile *of = output_files[ost->file_index];
1357 AVFilterContext *filter;
1358 AVCodecContext *enc = ost->enc_ctx;
1363 filter = ost->filter->filter;
1365 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1366 return AVERROR(ENOMEM);
1368 filtered_frame = ost->filtered_frame;
1371 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1372 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1373 AV_BUFFERSINK_FLAG_NO_REQUEST);
1375 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1376 av_log(NULL, AV_LOG_WARNING,
1377 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1378 } else if (flush && ret == AVERROR_EOF) {
1379 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1380 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1384 if (ost->finished) {
1385 av_frame_unref(filtered_frame);
1388 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1389 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1390 AVRational tb = enc->time_base;
1391 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1393 tb.den <<= extra_bits;
1395 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1396 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1397 float_pts /= 1 << extra_bits;
1398 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1399 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1401 filtered_frame->pts =
1402 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1403 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1405 //if (ost->source_index >= 0)
1406 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1408 switch (filter->inputs[0]->type) {
1409 case AVMEDIA_TYPE_VIDEO:
1410 if (!ost->frame_aspect_ratio.num)
1411 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1414 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1415 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1417 enc->time_base.num, enc->time_base.den);
1420 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1422 case AVMEDIA_TYPE_AUDIO:
1423 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1424 enc->channels != av_frame_get_channels(filtered_frame)) {
1425 av_log(NULL, AV_LOG_ERROR,
1426 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1429 do_audio_out(of->ctx, ost, filtered_frame);
1432 // TODO support subtitle filters
1436 av_frame_unref(filtered_frame);
1443 static void print_final_stats(int64_t total_size)
1445 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1446 uint64_t subtitle_size = 0;
1447 uint64_t data_size = 0;
1448 float percent = -1.0;
1452 for (i = 0; i < nb_output_streams; i++) {
1453 OutputStream *ost = output_streams[i];
1454 switch (ost->enc_ctx->codec_type) {
1455 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1456 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1457 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1458 default: other_size += ost->data_size; break;
1460 extra_size += ost->enc_ctx->extradata_size;
1461 data_size += ost->data_size;
1462 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1463 != AV_CODEC_FLAG_PASS1)
1467 if (data_size && total_size>0 && total_size >= data_size)
1468 percent = 100.0 * (total_size - data_size) / data_size;
1470 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1471 video_size / 1024.0,
1472 audio_size / 1024.0,
1473 subtitle_size / 1024.0,
1474 other_size / 1024.0,
1475 extra_size / 1024.0);
1477 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1479 av_log(NULL, AV_LOG_INFO, "unknown");
1480 av_log(NULL, AV_LOG_INFO, "\n");
1482 /* print verbose per-stream stats */
1483 for (i = 0; i < nb_input_files; i++) {
1484 InputFile *f = input_files[i];
1485 uint64_t total_packets = 0, total_size = 0;
1487 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1488 i, f->ctx->filename);
1490 for (j = 0; j < f->nb_streams; j++) {
1491 InputStream *ist = input_streams[f->ist_index + j];
1492 enum AVMediaType type = ist->dec_ctx->codec_type;
1494 total_size += ist->data_size;
1495 total_packets += ist->nb_packets;
1497 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1498 i, j, media_type_string(type));
1499 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1500 ist->nb_packets, ist->data_size);
1502 if (ist->decoding_needed) {
1503 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1504 ist->frames_decoded);
1505 if (type == AVMEDIA_TYPE_AUDIO)
1506 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1507 av_log(NULL, AV_LOG_VERBOSE, "; ");
1510 av_log(NULL, AV_LOG_VERBOSE, "\n");
1513 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1514 total_packets, total_size);
1517 for (i = 0; i < nb_output_files; i++) {
1518 OutputFile *of = output_files[i];
1519 uint64_t total_packets = 0, total_size = 0;
1521 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1522 i, of->ctx->filename);
1524 for (j = 0; j < of->ctx->nb_streams; j++) {
1525 OutputStream *ost = output_streams[of->ost_index + j];
1526 enum AVMediaType type = ost->enc_ctx->codec_type;
1528 total_size += ost->data_size;
1529 total_packets += ost->packets_written;
1531 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1532 i, j, media_type_string(type));
1533 if (ost->encoding_needed) {
1534 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1535 ost->frames_encoded);
1536 if (type == AVMEDIA_TYPE_AUDIO)
1537 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1538 av_log(NULL, AV_LOG_VERBOSE, "; ");
1541 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1542 ost->packets_written, ost->data_size);
1544 av_log(NULL, AV_LOG_VERBOSE, "\n");
1547 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1548 total_packets, total_size);
1550 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1551 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1553 av_log(NULL, AV_LOG_WARNING, "\n");
1555 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1560 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1563 AVBPrint buf_script;
1565 AVFormatContext *oc;
1567 AVCodecContext *enc;
1568 int frame_number, vid, i;
1571 int64_t pts = INT64_MIN + 1;
1572 static int64_t last_time = -1;
1573 static int qp_histogram[52];
1574 int hours, mins, secs, us;
1578 if (!print_stats && !is_last_report && !progress_avio)
1581 if (!is_last_report) {
1582 if (last_time == -1) {
1583 last_time = cur_time;
1586 if ((cur_time - last_time) < 500000)
1588 last_time = cur_time;
1591 t = (cur_time-timer_start) / 1000000.0;
1594 oc = output_files[0]->ctx;
1596 total_size = avio_size(oc->pb);
1597 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1598 total_size = avio_tell(oc->pb);
1602 av_bprint_init(&buf_script, 0, 1);
1603 for (i = 0; i < nb_output_streams; i++) {
1605 ost = output_streams[i];
1607 if (!ost->stream_copy)
1608 q = ost->quality / (float) FF_QP2LAMBDA;
1610 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1611 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1612 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1613 ost->file_index, ost->index, q);
1615 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1618 frame_number = ost->frame_number;
1619 fps = t > 1 ? frame_number / t : 0;
1620 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1621 frame_number, fps < 9.95, fps, q);
1622 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1623 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1624 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1625 ost->file_index, ost->index, q);
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1631 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1633 for (j = 0; j < 32; j++)
1634 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1637 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1639 double error, error_sum = 0;
1640 double scale, scale_sum = 0;
1642 char type[3] = { 'Y','U','V' };
1643 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1644 for (j = 0; j < 3; j++) {
1645 if (is_last_report) {
1646 error = enc->error[j];
1647 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1649 error = ost->error[j];
1650 scale = enc->width * enc->height * 255.0 * 255.0;
1656 p = psnr(error / scale);
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1658 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1659 ost->file_index, ost->index, type[j] | 32, p);
1661 p = psnr(error_sum / scale_sum);
1662 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1663 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1664 ost->file_index, ost->index, p);
1668 /* compute min output value */
1669 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1670 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1671 ost->st->time_base, AV_TIME_BASE_Q));
1673 nb_frames_drop += ost->last_dropped;
1676 secs = FFABS(pts) / AV_TIME_BASE;
1677 us = FFABS(pts) % AV_TIME_BASE;
1683 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1684 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1686 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1688 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1689 "size=%8.0fkB time=", total_size / 1024.0);
1691 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1692 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1693 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1694 (100 * us) / AV_TIME_BASE);
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1698 av_bprintf(&buf_script, "bitrate=N/A\n");
1700 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1701 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1704 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1705 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1706 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1707 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1708 hours, mins, secs, us);
1710 if (nb_frames_dup || nb_frames_drop)
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1712 nb_frames_dup, nb_frames_drop);
1713 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1714 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1717 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1718 av_bprintf(&buf_script, "speed=N/A\n");
1720 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1721 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1724 if (print_stats || is_last_report) {
1725 const char end = is_last_report ? '\n' : '\r';
1726 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1727 fprintf(stderr, "%s %c", buf, end);
1729 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1734 if (progress_avio) {
1735 av_bprintf(&buf_script, "progress=%s\n",
1736 is_last_report ? "end" : "continue");
1737 avio_write(progress_avio, buf_script.str,
1738 FFMIN(buf_script.len, buf_script.size - 1));
1739 avio_flush(progress_avio);
1740 av_bprint_finalize(&buf_script, NULL);
1741 if (is_last_report) {
1742 if ((ret = avio_closep(&progress_avio)) < 0)
1743 av_log(NULL, AV_LOG_ERROR,
1744 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1749 print_final_stats(total_size);
1752 static void flush_encoders(void)
1756 for (i = 0; i < nb_output_streams; i++) {
1757 OutputStream *ost = output_streams[i];
1758 AVCodecContext *enc = ost->enc_ctx;
1759 AVFormatContext *os = output_files[ost->file_index]->ctx;
1760 int stop_encoding = 0;
1762 if (!ost->encoding_needed)
1765 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1767 #if FF_API_LAVF_FMT_RAWPICTURE
1768 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1772 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1775 avcodec_send_frame(enc, NULL);
1778 const char *desc = NULL;
1780 switch (enc->codec_type) {
1781 case AVMEDIA_TYPE_AUDIO:
1784 case AVMEDIA_TYPE_VIDEO:
1794 av_init_packet(&pkt);
1798 update_benchmark(NULL);
1799 ret = avcodec_receive_packet(enc, &pkt);
1800 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1801 if (ret < 0 && ret != AVERROR_EOF) {
1802 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1807 if (ost->logfile && enc->stats_out) {
1808 fprintf(ost->logfile, "%s", enc->stats_out);
1810 if (ret == AVERROR_EOF) {
1814 if (ost->finished & MUXER_FINISHED) {
1815 av_packet_unref(&pkt);
1818 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1819 pkt_size = pkt.size;
1820 output_packet(os, &pkt, ost);
1821 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1822 do_video_stats(ost, pkt_size);
1833 * Check whether a packet from ist should be written into ost at this time
1835 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1837 OutputFile *of = output_files[ost->file_index];
1838 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1840 if (ost->source_index != ist_index)
1846 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1852 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1854 OutputFile *of = output_files[ost->file_index];
1855 InputFile *f = input_files [ist->file_index];
1856 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1857 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1861 av_init_packet(&opkt);
1863 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1864 !ost->copy_initial_nonkeyframes)
1867 if (!ost->frame_number && !ost->copy_prior_start) {
1868 int64_t comp_start = start_time;
1869 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1870 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1871 if (pkt->pts == AV_NOPTS_VALUE ?
1872 ist->pts < comp_start :
1873 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1877 if (of->recording_time != INT64_MAX &&
1878 ist->pts >= of->recording_time + start_time) {
1879 close_output_stream(ost);
1883 if (f->recording_time != INT64_MAX) {
1884 start_time = f->ctx->start_time;
1885 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1886 start_time += f->start_time;
1887 if (ist->pts >= f->recording_time + start_time) {
1888 close_output_stream(ost);
1893 /* force the input stream PTS */
1894 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1897 if (pkt->pts != AV_NOPTS_VALUE)
1898 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1900 opkt.pts = AV_NOPTS_VALUE;
1902 if (pkt->dts == AV_NOPTS_VALUE)
1903 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1905 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1906 opkt.dts -= ost_tb_start_time;
1908 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1909 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1911 duration = ist->dec_ctx->frame_size;
1912 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1913 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1914 ost->st->time_base) - ost_tb_start_time;
1917 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1918 opkt.flags = pkt->flags;
1919 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1920 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1921 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1922 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1923 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1925 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1926 &opkt.data, &opkt.size,
1927 pkt->data, pkt->size,
1928 pkt->flags & AV_PKT_FLAG_KEY);
1930 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1935 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1940 opkt.data = pkt->data;
1941 opkt.size = pkt->size;
1943 av_copy_packet_side_data(&opkt, pkt);
1945 #if FF_API_LAVF_FMT_RAWPICTURE
1946 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1947 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1948 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1949 /* store AVPicture in AVPacket, as expected by the output format */
1950 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1952 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1956 opkt.data = (uint8_t *)&pict;
1957 opkt.size = sizeof(AVPicture);
1958 opkt.flags |= AV_PKT_FLAG_KEY;
1962 output_packet(of->ctx, &opkt, ost);
1965 int guess_input_channel_layout(InputStream *ist)
1967 AVCodecContext *dec = ist->dec_ctx;
1969 if (!dec->channel_layout) {
1970 char layout_name[256];
1972 if (dec->channels > ist->guess_layout_max)
1974 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1975 if (!dec->channel_layout)
1977 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1978 dec->channels, dec->channel_layout);
1979 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1980 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1985 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1987 if (*got_output || ret<0)
1988 decode_error_stat[ret<0] ++;
1990 if (ret < 0 && exit_on_error)
1993 if (exit_on_error && *got_output && ist) {
1994 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1995 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2001 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2002 // There is the following difference: if you got a frame, you must call
2003 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2004 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2005 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2012 ret = avcodec_send_packet(avctx, pkt);
2013 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2014 // decoded frames with avcodec_receive_frame() until done.
2015 if (ret < 0 && ret != AVERROR_EOF)
2019 ret = avcodec_receive_frame(avctx, frame);
2020 if (ret < 0 && ret != AVERROR(EAGAIN))
2028 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2030 AVFrame *decoded_frame, *f;
2031 AVCodecContext *avctx = ist->dec_ctx;
2032 int i, ret, err = 0, resample_changed;
2033 AVRational decoded_frame_tb;
2035 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2036 return AVERROR(ENOMEM);
2037 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2038 return AVERROR(ENOMEM);
2039 decoded_frame = ist->decoded_frame;
2041 update_benchmark(NULL);
2042 ret = decode(avctx, decoded_frame, got_output, pkt);
2043 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2045 if (ret >= 0 && avctx->sample_rate <= 0) {
2046 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2047 ret = AVERROR_INVALIDDATA;
2050 if (ret != AVERROR_EOF)
2051 check_decode_result(ist, got_output, ret);
2053 if (!*got_output || ret < 0)
2056 ist->samples_decoded += decoded_frame->nb_samples;
2057 ist->frames_decoded++;
2060 /* increment next_dts to use for the case where the input stream does not
2061 have timestamps or there are multiple frames in the packet */
2062 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2064 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2068 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2069 ist->resample_channels != avctx->channels ||
2070 ist->resample_channel_layout != decoded_frame->channel_layout ||
2071 ist->resample_sample_rate != decoded_frame->sample_rate;
2072 if (resample_changed) {
2073 char layout1[64], layout2[64];
2075 if (!guess_input_channel_layout(ist)) {
2076 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2077 "layout for Input Stream #%d.%d\n", ist->file_index,
2081 decoded_frame->channel_layout = avctx->channel_layout;
2083 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2084 ist->resample_channel_layout);
2085 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2086 decoded_frame->channel_layout);
2088 av_log(NULL, AV_LOG_INFO,
2089 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2090 ist->file_index, ist->st->index,
2091 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2092 ist->resample_channels, layout1,
2093 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2094 avctx->channels, layout2);
2096 ist->resample_sample_fmt = decoded_frame->format;
2097 ist->resample_sample_rate = decoded_frame->sample_rate;
2098 ist->resample_channel_layout = decoded_frame->channel_layout;
2099 ist->resample_channels = avctx->channels;
2101 for (i = 0; i < nb_filtergraphs; i++)
2102 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2103 FilterGraph *fg = filtergraphs[i];
2104 if (configure_filtergraph(fg) < 0) {
2105 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2111 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2112 decoded_frame_tb = ist->st->time_base;
2113 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2114 decoded_frame->pts = pkt->pts;
2115 decoded_frame_tb = ist->st->time_base;
2117 decoded_frame->pts = ist->dts;
2118 decoded_frame_tb = AV_TIME_BASE_Q;
2120 if (decoded_frame->pts != AV_NOPTS_VALUE)
2121 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2122 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2123 (AVRational){1, avctx->sample_rate});
2124 ist->nb_samples = decoded_frame->nb_samples;
2125 for (i = 0; i < ist->nb_filters; i++) {
2126 if (i < ist->nb_filters - 1) {
2127 f = ist->filter_frame;
2128 err = av_frame_ref(f, decoded_frame);
2133 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2134 AV_BUFFERSRC_FLAG_PUSH);
2135 if (err == AVERROR_EOF)
2136 err = 0; /* ignore */
2140 decoded_frame->pts = AV_NOPTS_VALUE;
2142 av_frame_unref(ist->filter_frame);
2143 av_frame_unref(decoded_frame);
2144 return err < 0 ? err : ret;
2147 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2149 AVFrame *decoded_frame, *f;
2150 int i, ret = 0, err = 0, resample_changed;
2151 int64_t best_effort_timestamp;
2152 int64_t dts = AV_NOPTS_VALUE;
2153 AVRational *frame_sample_aspect;
2156 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2157 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2159 if (!eof && pkt && pkt->size == 0)
2162 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2163 return AVERROR(ENOMEM);
2164 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2165 return AVERROR(ENOMEM);
2166 decoded_frame = ist->decoded_frame;
2167 if (ist->dts != AV_NOPTS_VALUE)
2168 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2171 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2174 // The old code used to set dts on the drain packet, which does not work
2175 // with the new API anymore.
2177 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2179 return AVERROR(ENOMEM);
2180 ist->dts_buffer = new;
2181 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2184 update_benchmark(NULL);
2185 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2186 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2188 // The following line may be required in some cases where there is no parser
2189 // or the parser does not has_b_frames correctly
2190 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2191 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2192 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2194 av_log(ist->dec_ctx, AV_LOG_WARNING,
2195 "video_delay is larger in decoder than demuxer %d > %d.\n"
2196 "If you want to help, upload a sample "
2197 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2198 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2199 ist->dec_ctx->has_b_frames,
2200 ist->st->codecpar->video_delay);
2203 if (ret != AVERROR_EOF)
2204 check_decode_result(ist, got_output, ret);
2206 if (*got_output && ret >= 0) {
2207 if (ist->dec_ctx->width != decoded_frame->width ||
2208 ist->dec_ctx->height != decoded_frame->height ||
2209 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2210 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2211 decoded_frame->width,
2212 decoded_frame->height,
2213 decoded_frame->format,
2214 ist->dec_ctx->width,
2215 ist->dec_ctx->height,
2216 ist->dec_ctx->pix_fmt);
2220 if (!*got_output || ret < 0)
2223 if(ist->top_field_first>=0)
2224 decoded_frame->top_field_first = ist->top_field_first;
2226 ist->frames_decoded++;
2228 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2229 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2233 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2235 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2237 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2238 best_effort_timestamp = ist->dts_buffer[0];
2240 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2241 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2242 ist->nb_dts_buffer--;
2245 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2246 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2248 if (ts != AV_NOPTS_VALUE)
2249 ist->next_pts = ist->pts = ts;
2253 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2254 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2255 ist->st->index, av_ts2str(decoded_frame->pts),
2256 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2257 best_effort_timestamp,
2258 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2259 decoded_frame->key_frame, decoded_frame->pict_type,
2260 ist->st->time_base.num, ist->st->time_base.den);
2263 if (ist->st->sample_aspect_ratio.num)
2264 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2266 resample_changed = ist->resample_width != decoded_frame->width ||
2267 ist->resample_height != decoded_frame->height ||
2268 ist->resample_pix_fmt != decoded_frame->format;
2269 if (resample_changed) {
2270 av_log(NULL, AV_LOG_INFO,
2271 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2272 ist->file_index, ist->st->index,
2273 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2274 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2276 ist->resample_width = decoded_frame->width;
2277 ist->resample_height = decoded_frame->height;
2278 ist->resample_pix_fmt = decoded_frame->format;
2280 for (i = 0; i < nb_filtergraphs; i++) {
2281 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2282 configure_filtergraph(filtergraphs[i]) < 0) {
2283 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2289 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2290 for (i = 0; i < ist->nb_filters; i++) {
2291 if (!frame_sample_aspect->num)
2292 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2294 if (i < ist->nb_filters - 1) {
2295 f = ist->filter_frame;
2296 err = av_frame_ref(f, decoded_frame);
2301 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2302 if (err == AVERROR_EOF) {
2303 err = 0; /* ignore */
2304 } else if (err < 0) {
2305 av_log(NULL, AV_LOG_FATAL,
2306 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2312 av_frame_unref(ist->filter_frame);
2313 av_frame_unref(decoded_frame);
2314 return err < 0 ? err : ret;
2317 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2319 AVSubtitle subtitle;
2320 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2321 &subtitle, got_output, pkt);
2323 check_decode_result(NULL, got_output, ret);
2325 if (ret < 0 || !*got_output) {
2327 sub2video_flush(ist);
2331 if (ist->fix_sub_duration) {
2333 if (ist->prev_sub.got_output) {
2334 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2335 1000, AV_TIME_BASE);
2336 if (end < ist->prev_sub.subtitle.end_display_time) {
2337 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2338 "Subtitle duration reduced from %d to %d%s\n",
2339 ist->prev_sub.subtitle.end_display_time, end,
2340 end <= 0 ? ", dropping it" : "");
2341 ist->prev_sub.subtitle.end_display_time = end;
2344 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2345 FFSWAP(int, ret, ist->prev_sub.ret);
2346 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2354 sub2video_update(ist, &subtitle);
2356 if (!subtitle.num_rects)
2359 ist->frames_decoded++;
2361 for (i = 0; i < nb_output_streams; i++) {
2362 OutputStream *ost = output_streams[i];
2364 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2365 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2368 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2372 avsubtitle_free(&subtitle);
2376 static int send_filter_eof(InputStream *ist)
2379 for (i = 0; i < ist->nb_filters; i++) {
2380 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2387 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2388 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2392 int eof_reached = 0;
2395 if (!ist->saw_first_ts) {
2396 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2398 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2399 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2400 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2402 ist->saw_first_ts = 1;
2405 if (ist->next_dts == AV_NOPTS_VALUE)
2406 ist->next_dts = ist->dts;
2407 if (ist->next_pts == AV_NOPTS_VALUE)
2408 ist->next_pts = ist->pts;
2412 av_init_packet(&avpkt);
2419 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2420 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2421 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2422 ist->next_pts = ist->pts = ist->dts;
2425 // while we have more to decode or while the decoder did output something on EOF
2426 while (ist->decoding_needed) {
2430 ist->pts = ist->next_pts;
2431 ist->dts = ist->next_dts;
2433 switch (ist->dec_ctx->codec_type) {
2434 case AVMEDIA_TYPE_AUDIO:
2435 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2437 case AVMEDIA_TYPE_VIDEO:
2438 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2439 if (!repeating || !pkt || got_output) {
2440 if (pkt && pkt->duration) {
2441 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2442 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2443 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2444 duration = ((int64_t)AV_TIME_BASE *
2445 ist->dec_ctx->framerate.den * ticks) /
2446 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2449 if(ist->dts != AV_NOPTS_VALUE && duration) {
2450 ist->next_dts += duration;
2452 ist->next_dts = AV_NOPTS_VALUE;
2456 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2458 case AVMEDIA_TYPE_SUBTITLE:
2461 ret = transcode_subtitles(ist, &avpkt, &got_output);
2462 if (!pkt && ret >= 0)
2469 if (ret == AVERROR_EOF) {
2475 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2476 ist->file_index, ist->st->index, av_err2str(ret));
2479 // Decoding might not terminate if we're draining the decoder, and
2480 // the decoder keeps returning an error.
2481 // This should probably be considered a libavcodec issue.
2482 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2491 // During draining, we might get multiple output frames in this loop.
2492 // ffmpeg.c does not drain the filter chain on configuration changes,
2493 // which means if we send multiple frames at once to the filters, and
2494 // one of those frames changes configuration, the buffered frames will
2495 // be lost. This can upset certain FATE tests.
2496 // Decode only 1 frame per call on EOF to appease these FATE tests.
2497 // The ideal solution would be to rewrite decoding to use the new
2498 // decoding API in a better way.
2505 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2506 /* except when looping we need to flush but not to send an EOF */
2507 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2508 int ret = send_filter_eof(ist);
2510 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2515 /* handle stream copy */
2516 if (!ist->decoding_needed) {
2517 ist->dts = ist->next_dts;
2518 switch (ist->dec_ctx->codec_type) {
2519 case AVMEDIA_TYPE_AUDIO:
2520 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2521 ist->dec_ctx->sample_rate;
2523 case AVMEDIA_TYPE_VIDEO:
2524 if (ist->framerate.num) {
2525 // TODO: Remove work-around for c99-to-c89 issue 7
2526 AVRational time_base_q = AV_TIME_BASE_Q;
2527 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2528 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2529 } else if (pkt->duration) {
2530 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2531 } else if(ist->dec_ctx->framerate.num != 0) {
2532 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2533 ist->next_dts += ((int64_t)AV_TIME_BASE *
2534 ist->dec_ctx->framerate.den * ticks) /
2535 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2539 ist->pts = ist->dts;
2540 ist->next_pts = ist->next_dts;
2542 for (i = 0; pkt && i < nb_output_streams; i++) {
2543 OutputStream *ost = output_streams[i];
2545 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2548 do_streamcopy(ist, ost, pkt);
2551 return !eof_reached;
2554 static void print_sdp(void)
2559 AVIOContext *sdp_pb;
2560 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2564 for (i = 0, j = 0; i < nb_output_files; i++) {
2565 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2566 avc[j] = output_files[i]->ctx;
2574 av_sdp_create(avc, j, sdp, sizeof(sdp));
2576 if (!sdp_filename) {
2577 printf("SDP:\n%s\n", sdp);
2580 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2581 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2583 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2584 avio_closep(&sdp_pb);
2585 av_freep(&sdp_filename);
2593 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2596 for (i = 0; hwaccels[i].name; i++)
2597 if (hwaccels[i].pix_fmt == pix_fmt)
2598 return &hwaccels[i];
2602 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2604 InputStream *ist = s->opaque;
2605 const enum AVPixelFormat *p;
2608 for (p = pix_fmts; *p != -1; p++) {
2609 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2610 const HWAccel *hwaccel;
2612 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2615 hwaccel = get_hwaccel(*p);
2617 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2618 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2621 ret = hwaccel->init(s);
2623 if (ist->hwaccel_id == hwaccel->id) {
2624 av_log(NULL, AV_LOG_FATAL,
2625 "%s hwaccel requested for input stream #%d:%d, "
2626 "but cannot be initialized.\n", hwaccel->name,
2627 ist->file_index, ist->st->index);
2628 return AV_PIX_FMT_NONE;
2633 if (ist->hw_frames_ctx) {
2634 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2635 if (!s->hw_frames_ctx)
2636 return AV_PIX_FMT_NONE;
2639 ist->active_hwaccel_id = hwaccel->id;
2640 ist->hwaccel_pix_fmt = *p;
2647 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2649 InputStream *ist = s->opaque;
2651 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2652 return ist->hwaccel_get_buffer(s, frame, flags);
2654 return avcodec_default_get_buffer2(s, frame, flags);
2657 static int init_input_stream(int ist_index, char *error, int error_len)
2660 InputStream *ist = input_streams[ist_index];
2662 if (ist->decoding_needed) {
2663 AVCodec *codec = ist->dec;
2665 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2666 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2667 return AVERROR(EINVAL);
2670 ist->dec_ctx->opaque = ist;
2671 ist->dec_ctx->get_format = get_format;
2672 ist->dec_ctx->get_buffer2 = get_buffer;
2673 ist->dec_ctx->thread_safe_callbacks = 1;
2675 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2676 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2677 (ist->decoding_needed & DECODING_FOR_OST)) {
2678 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2679 if (ist->decoding_needed & DECODING_FOR_FILTER)
2680 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2683 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2685 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2686 * audio, and video decoders such as cuvid or mediacodec */
2687 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2689 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2690 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2691 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2692 if (ret == AVERROR_EXPERIMENTAL)
2693 abort_codec_experimental(codec, 0);
2695 snprintf(error, error_len,
2696 "Error while opening decoder for input stream "
2698 ist->file_index, ist->st->index, av_err2str(ret));
2701 assert_avoptions(ist->decoder_opts);
2704 ist->next_pts = AV_NOPTS_VALUE;
2705 ist->next_dts = AV_NOPTS_VALUE;
2710 static InputStream *get_input_stream(OutputStream *ost)
2712 if (ost->source_index >= 0)
2713 return input_streams[ost->source_index];
2717 static int compare_int64(const void *a, const void *b)
2719 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2722 static int init_output_bsfs(OutputStream *ost)
2727 if (!ost->nb_bitstream_filters)
2730 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2731 ctx = ost->bsf_ctx[i];
2733 ret = avcodec_parameters_copy(ctx->par_in,
2734 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2738 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2740 ret = av_bsf_init(ctx);
2742 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2743 ost->bsf_ctx[i]->filter->name);
2748 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2749 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2753 ost->st->time_base = ctx->time_base_out;
2758 static int init_output_stream_streamcopy(OutputStream *ost)
2760 OutputFile *of = output_files[ost->file_index];
2761 InputStream *ist = get_input_stream(ost);
2762 AVCodecParameters *par_dst = ost->st->codecpar;
2763 AVCodecParameters *par_src = ost->ref_par;
2766 uint64_t extra_size;
2768 av_assert0(ist && !ost->filter);
2770 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2771 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2773 av_log(NULL, AV_LOG_FATAL,
2774 "Error setting up codec context options.\n");
2777 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2779 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2781 if (extra_size > INT_MAX) {
2782 return AVERROR(EINVAL);
2785 /* if stream_copy is selected, no need to decode or encode */
2786 par_dst->codec_id = par_src->codec_id;
2787 par_dst->codec_type = par_src->codec_type;
2789 if (!par_dst->codec_tag) {
2790 unsigned int codec_tag;
2791 if (!of->ctx->oformat->codec_tag ||
2792 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2793 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2794 par_dst->codec_tag = par_src->codec_tag;
2797 par_dst->bit_rate = par_src->bit_rate;
2798 par_dst->field_order = par_src->field_order;
2799 par_dst->chroma_location = par_src->chroma_location;
2801 if (par_src->extradata_size) {
2802 par_dst->extradata = av_mallocz(extra_size);
2803 if (!par_dst->extradata) {
2804 return AVERROR(ENOMEM);
2806 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2807 par_dst->extradata_size = par_src->extradata_size;
2809 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2810 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2812 if (!ost->frame_rate.num)
2813 ost->frame_rate = ist->framerate;
2814 ost->st->avg_frame_rate = ost->frame_rate;
2816 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2820 // copy timebase while removing common factors
2821 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2823 if (ist->st->nb_side_data) {
2824 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2825 sizeof(*ist->st->side_data));
2826 if (!ost->st->side_data)
2827 return AVERROR(ENOMEM);
2829 ost->st->nb_side_data = 0;
2830 for (i = 0; i < ist->st->nb_side_data; i++) {
2831 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2832 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2834 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2837 sd_dst->data = av_malloc(sd_src->size);
2839 return AVERROR(ENOMEM);
2840 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2841 sd_dst->size = sd_src->size;
2842 sd_dst->type = sd_src->type;
2843 ost->st->nb_side_data++;
2847 ost->parser = av_parser_init(par_dst->codec_id);
2848 ost->parser_avctx = avcodec_alloc_context3(NULL);
2849 if (!ost->parser_avctx)
2850 return AVERROR(ENOMEM);
2852 switch (par_dst->codec_type) {
2853 case AVMEDIA_TYPE_AUDIO:
2854 if (audio_volume != 256) {
2855 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2858 par_dst->channel_layout = par_src->channel_layout;
2859 par_dst->sample_rate = par_src->sample_rate;
2860 par_dst->channels = par_src->channels;
2861 par_dst->frame_size = par_src->frame_size;
2862 par_dst->block_align = par_src->block_align;
2863 par_dst->initial_padding = par_src->initial_padding;
2864 par_dst->trailing_padding = par_src->trailing_padding;
2865 par_dst->profile = par_src->profile;
2866 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2867 par_dst->block_align= 0;
2868 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2869 par_dst->block_align= 0;
2871 case AVMEDIA_TYPE_VIDEO:
2872 par_dst->format = par_src->format;
2873 par_dst->color_space = par_src->color_space;
2874 par_dst->color_range = par_src->color_range;
2875 par_dst->color_primaries = par_src->color_primaries;
2876 par_dst->color_trc = par_src->color_trc;
2877 par_dst->width = par_src->width;
2878 par_dst->height = par_src->height;
2879 par_dst->video_delay = par_src->video_delay;
2880 par_dst->profile = par_src->profile;
2881 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2883 av_mul_q(ost->frame_aspect_ratio,
2884 (AVRational){ par_dst->height, par_dst->width });
2885 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2886 "with stream copy may produce invalid files\n");
2888 else if (ist->st->sample_aspect_ratio.num)
2889 sar = ist->st->sample_aspect_ratio;
2891 sar = par_src->sample_aspect_ratio;
2892 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2893 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2894 ost->st->r_frame_rate = ist->st->r_frame_rate;
2896 case AVMEDIA_TYPE_SUBTITLE:
2897 par_dst->width = par_src->width;
2898 par_dst->height = par_src->height;
2900 case AVMEDIA_TYPE_UNKNOWN:
2901 case AVMEDIA_TYPE_DATA:
2902 case AVMEDIA_TYPE_ATTACHMENT:
2911 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2915 if (ost->encoding_needed) {
2916 AVCodec *codec = ost->enc;
2917 AVCodecContext *dec = NULL;
2920 if ((ist = get_input_stream(ost)))
2922 if (dec && dec->subtitle_header) {
2923 /* ASS code assumes this buffer is null terminated so add extra byte. */
2924 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2925 if (!ost->enc_ctx->subtitle_header)
2926 return AVERROR(ENOMEM);
2927 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2928 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2930 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2931 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2932 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2934 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2935 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2936 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2938 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2939 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2940 if (!ost->enc_ctx->hw_frames_ctx)
2941 return AVERROR(ENOMEM);
2944 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2945 if (ret == AVERROR_EXPERIMENTAL)
2946 abort_codec_experimental(codec, 1);
2947 snprintf(error, error_len,
2948 "Error while opening encoder for output stream #%d:%d - "
2949 "maybe incorrect parameters such as bit_rate, rate, width or height",
2950 ost->file_index, ost->index);
2953 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2954 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2955 av_buffersink_set_frame_size(ost->filter->filter,
2956 ost->enc_ctx->frame_size);
2957 assert_avoptions(ost->encoder_opts);
2958 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2959 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2960 " It takes bits/s as argument, not kbits/s\n");
2962 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
2964 av_log(NULL, AV_LOG_FATAL,
2965 "Error initializing the output stream codec context.\n");
2969 * FIXME: ost->st->codec should't be needed here anymore.
2971 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2975 if (ost->enc_ctx->nb_coded_side_data) {
2978 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2979 sizeof(*ost->st->side_data));
2980 if (!ost->st->side_data)
2981 return AVERROR(ENOMEM);
2983 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2984 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2985 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2987 sd_dst->data = av_malloc(sd_src->size);
2989 return AVERROR(ENOMEM);
2990 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2991 sd_dst->size = sd_src->size;
2992 sd_dst->type = sd_src->type;
2993 ost->st->nb_side_data++;
2997 // copy timebase while removing common factors
2998 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2999 ost->st->codec->codec= ost->enc_ctx->codec;
3000 } else if (ost->stream_copy) {
3001 ret = init_output_stream_streamcopy(ost);
3006 * FIXME: will the codec context used by the parser during streamcopy
3007 * This should go away with the new parser API.
3009 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3014 /* initialize bitstream filters for the output stream
3015 * needs to be done here, because the codec id for streamcopy is not
3016 * known until now */
3017 ret = init_output_bsfs(ost);
3024 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3025 AVCodecContext *avctx)
3028 int n = 1, i, size, index = 0;
3031 for (p = kf; *p; p++)
3035 pts = av_malloc_array(size, sizeof(*pts));
3037 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3042 for (i = 0; i < n; i++) {
3043 char *next = strchr(p, ',');
3048 if (!memcmp(p, "chapters", 8)) {
3050 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3053 if (avf->nb_chapters > INT_MAX - size ||
3054 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3056 av_log(NULL, AV_LOG_FATAL,
3057 "Could not allocate forced key frames array.\n");
3060 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3061 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3063 for (j = 0; j < avf->nb_chapters; j++) {
3064 AVChapter *c = avf->chapters[j];
3065 av_assert1(index < size);
3066 pts[index++] = av_rescale_q(c->start, c->time_base,
3067 avctx->time_base) + t;
3072 t = parse_time_or_die("force_key_frames", p, 1);
3073 av_assert1(index < size);
3074 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3081 av_assert0(index == size);
3082 qsort(pts, size, sizeof(*pts), compare_int64);
3083 ost->forced_kf_count = size;
3084 ost->forced_kf_pts = pts;
3087 static void report_new_stream(int input_index, AVPacket *pkt)
3089 InputFile *file = input_files[input_index];
3090 AVStream *st = file->ctx->streams[pkt->stream_index];
3092 if (pkt->stream_index < file->nb_streams_warn)
3094 av_log(file->ctx, AV_LOG_WARNING,
3095 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3096 av_get_media_type_string(st->codecpar->codec_type),
3097 input_index, pkt->stream_index,
3098 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3099 file->nb_streams_warn = pkt->stream_index + 1;
3102 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3104 AVDictionaryEntry *e;
3106 uint8_t *encoder_string;
3107 int encoder_string_len;
3108 int format_flags = 0;
3109 int codec_flags = 0;
3111 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3114 e = av_dict_get(of->opts, "fflags", NULL, 0);
3116 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3119 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3121 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3123 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3126 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3129 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3130 encoder_string = av_mallocz(encoder_string_len);
3131 if (!encoder_string)
3134 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3135 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3137 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3138 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3139 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3140 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3143 static int transcode_init(void)
3145 int ret = 0, i, j, k;
3146 AVFormatContext *oc;
3149 char error[1024] = {0};
3152 for (i = 0; i < nb_filtergraphs; i++) {
3153 FilterGraph *fg = filtergraphs[i];
3154 for (j = 0; j < fg->nb_outputs; j++) {
3155 OutputFilter *ofilter = fg->outputs[j];
3156 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3158 if (fg->nb_inputs != 1)
3160 for (k = nb_input_streams-1; k >= 0 ; k--)
3161 if (fg->inputs[0]->ist == input_streams[k])
3163 ofilter->ost->source_index = k;
3167 /* init framerate emulation */
3168 for (i = 0; i < nb_input_files; i++) {
3169 InputFile *ifile = input_files[i];
3170 if (ifile->rate_emu)
3171 for (j = 0; j < ifile->nb_streams; j++)
3172 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3175 /* for each output stream, we compute the right encoding parameters */
3176 for (i = 0; i < nb_output_streams; i++) {
3177 ost = output_streams[i];
3178 oc = output_files[ost->file_index]->ctx;
3179 ist = get_input_stream(ost);
3181 if (ost->attachment_filename)
3185 ost->st->disposition = ist->st->disposition;
3187 for (j=0; j<oc->nb_streams; j++) {
3188 AVStream *st = oc->streams[j];
3189 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3192 if (j == oc->nb_streams)
3193 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3194 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3195 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3198 if (!ost->stream_copy) {
3199 AVCodecContext *enc_ctx = ost->enc_ctx;
3200 AVCodecContext *dec_ctx = NULL;
3202 set_encoder_id(output_files[ost->file_index], ost);
3205 dec_ctx = ist->dec_ctx;
3207 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3211 if (qsv_transcode_init(ost))
3216 if (cuvid_transcode_init(ost))
3220 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3221 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3222 filtergraph_is_simple(ost->filter->graph)) {
3223 FilterGraph *fg = ost->filter->graph;
3224 if (configure_filtergraph(fg)) {
3225 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3230 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3231 if (!ost->frame_rate.num)
3232 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3233 if (ist && !ost->frame_rate.num)
3234 ost->frame_rate = ist->framerate;
3235 if (ist && !ost->frame_rate.num)
3236 ost->frame_rate = ist->st->r_frame_rate;
3237 if (ist && !ost->frame_rate.num) {
3238 ost->frame_rate = (AVRational){25, 1};
3239 av_log(NULL, AV_LOG_WARNING,
3241 "about the input framerate is available. Falling "
3242 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3243 "if you want a different framerate.\n",
3244 ost->file_index, ost->index);
3246 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3247 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3248 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3249 ost->frame_rate = ost->enc->supported_framerates[idx];
3251 // reduce frame rate for mpeg4 to be within the spec limits
3252 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3253 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3254 ost->frame_rate.num, ost->frame_rate.den, 65535);
3258 switch (enc_ctx->codec_type) {
3259 case AVMEDIA_TYPE_AUDIO:
3260 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3262 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3263 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3264 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3265 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3266 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3267 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3269 case AVMEDIA_TYPE_VIDEO:
3270 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3271 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3272 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3273 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3274 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3275 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3276 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3278 for (j = 0; j < ost->forced_kf_count; j++)
3279 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3281 enc_ctx->time_base);
3283 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3284 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3285 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3286 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3287 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3288 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3289 if (!strncmp(ost->enc->name, "libx264", 7) &&
3290 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3291 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3292 av_log(NULL, AV_LOG_WARNING,
3293 "No pixel format specified, %s for H.264 encoding chosen.\n"
3294 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3295 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3296 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3297 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3298 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3299 av_log(NULL, AV_LOG_WARNING,
3300 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3301 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3302 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3303 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3305 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3306 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3308 ost->st->avg_frame_rate = ost->frame_rate;
3311 enc_ctx->width != dec_ctx->width ||
3312 enc_ctx->height != dec_ctx->height ||
3313 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3314 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3317 if (ost->forced_keyframes) {
3318 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3319 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3320 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3322 av_log(NULL, AV_LOG_ERROR,
3323 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3326 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3327 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3328 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3329 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3331 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3332 // parse it only for static kf timings
3333 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3334 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3338 case AVMEDIA_TYPE_SUBTITLE:
3339 enc_ctx->time_base = (AVRational){1, 1000};
3340 if (!enc_ctx->width) {
3341 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3342 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3345 case AVMEDIA_TYPE_DATA:
3353 if (ost->disposition) {
3354 static const AVOption opts[] = {
3355 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3356 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3357 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3358 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3359 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3360 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3361 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3362 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3363 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3364 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3365 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3366 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3367 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3368 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3371 static const AVClass class = {
3373 .item_name = av_default_item_name,
3375 .version = LIBAVUTIL_VERSION_INT,
3377 const AVClass *pclass = &class;
3379 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3385 /* init input streams */
3386 for (i = 0; i < nb_input_streams; i++)
3387 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3388 for (i = 0; i < nb_output_streams; i++) {
3389 ost = output_streams[i];
3390 avcodec_close(ost->enc_ctx);
3395 /* open each encoder */
3396 for (i = 0; i < nb_output_streams; i++) {
3397 ret = init_output_stream(output_streams[i], error, sizeof(error));
3402 /* discard unused programs */
3403 for (i = 0; i < nb_input_files; i++) {
3404 InputFile *ifile = input_files[i];
3405 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3406 AVProgram *p = ifile->ctx->programs[j];
3407 int discard = AVDISCARD_ALL;
3409 for (k = 0; k < p->nb_stream_indexes; k++)
3410 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3411 discard = AVDISCARD_DEFAULT;
3414 p->discard = discard;
3418 /* open files and write file headers */
3419 for (i = 0; i < nb_output_files; i++) {
3420 oc = output_files[i]->ctx;
3421 oc->interrupt_callback = int_cb;
3422 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3423 snprintf(error, sizeof(error),
3424 "Could not write header for output file #%d "
3425 "(incorrect codec parameters ?): %s",
3426 i, av_err2str(ret));
3427 ret = AVERROR(EINVAL);
3430 // assert_avoptions(output_files[i]->opts);
3431 if (strcmp(oc->oformat->name, "rtp")) {
3437 /* dump the file output parameters - cannot be done before in case
3439 for (i = 0; i < nb_output_files; i++) {
3440 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3443 /* dump the stream mapping */
3444 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3445 for (i = 0; i < nb_input_streams; i++) {
3446 ist = input_streams[i];
3448 for (j = 0; j < ist->nb_filters; j++) {
3449 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3450 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3451 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3452 ist->filters[j]->name);
3453 if (nb_filtergraphs > 1)
3454 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3455 av_log(NULL, AV_LOG_INFO, "\n");
3460 for (i = 0; i < nb_output_streams; i++) {
3461 ost = output_streams[i];
3463 if (ost->attachment_filename) {
3464 /* an attached file */
3465 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3466 ost->attachment_filename, ost->file_index, ost->index);
3470 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3471 /* output from a complex graph */
3472 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3473 if (nb_filtergraphs > 1)
3474 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3476 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3477 ost->index, ost->enc ? ost->enc->name : "?");
3481 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3482 input_streams[ost->source_index]->file_index,
3483 input_streams[ost->source_index]->st->index,
3486 if (ost->sync_ist != input_streams[ost->source_index])
3487 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3488 ost->sync_ist->file_index,
3489 ost->sync_ist->st->index);
3490 if (ost->stream_copy)
3491 av_log(NULL, AV_LOG_INFO, " (copy)");
3493 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3494 const AVCodec *out_codec = ost->enc;
3495 const char *decoder_name = "?";
3496 const char *in_codec_name = "?";
3497 const char *encoder_name = "?";
3498 const char *out_codec_name = "?";
3499 const AVCodecDescriptor *desc;
3502 decoder_name = in_codec->name;
3503 desc = avcodec_descriptor_get(in_codec->id);
3505 in_codec_name = desc->name;
3506 if (!strcmp(decoder_name, in_codec_name))
3507 decoder_name = "native";
3511 encoder_name = out_codec->name;
3512 desc = avcodec_descriptor_get(out_codec->id);
3514 out_codec_name = desc->name;
3515 if (!strcmp(encoder_name, out_codec_name))
3516 encoder_name = "native";
3519 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3520 in_codec_name, decoder_name,
3521 out_codec_name, encoder_name);
3523 av_log(NULL, AV_LOG_INFO, "\n");
3527 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3531 if (sdp_filename || want_sdp) {
3535 transcode_init_done = 1;
3540 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3541 static int need_output(void)
3545 for (i = 0; i < nb_output_streams; i++) {
3546 OutputStream *ost = output_streams[i];
3547 OutputFile *of = output_files[ost->file_index];
3548 AVFormatContext *os = output_files[ost->file_index]->ctx;
3550 if (ost->finished ||
3551 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3553 if (ost->frame_number >= ost->max_frames) {
3555 for (j = 0; j < of->ctx->nb_streams; j++)
3556 close_output_stream(output_streams[of->ost_index + j]);
3567 * Select the output stream to process.
3569 * @return selected output stream, or NULL if none available
3571 static OutputStream *choose_output(void)
3574 int64_t opts_min = INT64_MAX;
3575 OutputStream *ost_min = NULL;
3577 for (i = 0; i < nb_output_streams; i++) {
3578 OutputStream *ost = output_streams[i];
3579 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3580 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3582 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3583 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3585 if (!ost->finished && opts < opts_min) {
3587 ost_min = ost->unavailable ? NULL : ost;
3593 static void set_tty_echo(int on)
3597 if (tcgetattr(0, &tty) == 0) {
3598 if (on) tty.c_lflag |= ECHO;
3599 else tty.c_lflag &= ~ECHO;
3600 tcsetattr(0, TCSANOW, &tty);
3605 static int check_keyboard_interaction(int64_t cur_time)
3608 static int64_t last_time;
3609 if (received_nb_signals)
3610 return AVERROR_EXIT;
3611 /* read_key() returns 0 on EOF */
3612 if(cur_time - last_time >= 100000 && !run_as_daemon){
3614 last_time = cur_time;
3618 return AVERROR_EXIT;
3619 if (key == '+') av_log_set_level(av_log_get_level()+10);
3620 if (key == '-') av_log_set_level(av_log_get_level()-10);
3621 if (key == 's') qp_hist ^= 1;
3624 do_hex_dump = do_pkt_dump = 0;
3625 } else if(do_pkt_dump){
3629 av_log_set_level(AV_LOG_DEBUG);
3631 if (key == 'c' || key == 'C'){
3632 char buf[4096], target[64], command[256], arg[256] = {0};
3635 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3638 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3643 fprintf(stderr, "\n");
3645 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3646 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3647 target, time, command, arg);
3648 for (i = 0; i < nb_filtergraphs; i++) {
3649 FilterGraph *fg = filtergraphs[i];
3652 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3653 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3654 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3655 } else if (key == 'c') {
3656 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3657 ret = AVERROR_PATCHWELCOME;
3659 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3661 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3666 av_log(NULL, AV_LOG_ERROR,
3667 "Parse error, at least 3 arguments were expected, "
3668 "only %d given in string '%s'\n", n, buf);
3671 if (key == 'd' || key == 'D'){
3674 debug = input_streams[0]->st->codec->debug<<1;
3675 if(!debug) debug = 1;
3676 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3683 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3688 fprintf(stderr, "\n");
3689 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3690 fprintf(stderr,"error parsing debug value\n");
3692 for(i=0;i<nb_input_streams;i++) {
3693 input_streams[i]->st->codec->debug = debug;
3695 for(i=0;i<nb_output_streams;i++) {
3696 OutputStream *ost = output_streams[i];
3697 ost->enc_ctx->debug = debug;
3699 if(debug) av_log_set_level(AV_LOG_DEBUG);
3700 fprintf(stderr,"debug=%d\n", debug);
3703 fprintf(stderr, "key function\n"
3704 "? show this help\n"
3705 "+ increase verbosity\n"
3706 "- decrease verbosity\n"
3707 "c Send command to first matching filter supporting it\n"
3708 "C Send/Que command to all matching filters\n"
3709 "D cycle through available debug modes\n"
3710 "h dump packets/hex press to cycle through the 3 states\n"
3712 "s Show QP histogram\n"
3719 static void *input_thread(void *arg)
3722 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3727 ret = av_read_frame(f->ctx, &pkt);
3729 if (ret == AVERROR(EAGAIN)) {
3734 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3737 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3738 if (flags && ret == AVERROR(EAGAIN)) {
3740 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3741 av_log(f->ctx, AV_LOG_WARNING,
3742 "Thread message queue blocking; consider raising the "
3743 "thread_queue_size option (current value: %d)\n",
3744 f->thread_queue_size);
3747 if (ret != AVERROR_EOF)
3748 av_log(f->ctx, AV_LOG_ERROR,
3749 "Unable to send packet to main thread: %s\n",
3751 av_packet_unref(&pkt);
3752 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3760 static void free_input_threads(void)
3764 for (i = 0; i < nb_input_files; i++) {
3765 InputFile *f = input_files[i];
3768 if (!f || !f->in_thread_queue)
3770 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3771 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3772 av_packet_unref(&pkt);
3774 pthread_join(f->thread, NULL);
3776 av_thread_message_queue_free(&f->in_thread_queue);
3780 static int init_input_threads(void)
3784 if (nb_input_files == 1)
3787 for (i = 0; i < nb_input_files; i++) {
3788 InputFile *f = input_files[i];
3790 if (f->ctx->pb ? !f->ctx->pb->seekable :
3791 strcmp(f->ctx->iformat->name, "lavfi"))
3792 f->non_blocking = 1;
3793 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3794 f->thread_queue_size, sizeof(AVPacket));
3798 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3799 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3800 av_thread_message_queue_free(&f->in_thread_queue);
3801 return AVERROR(ret);
3807 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3809 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3811 AV_THREAD_MESSAGE_NONBLOCK : 0);
3815 static int get_input_packet(InputFile *f, AVPacket *pkt)
3819 for (i = 0; i < f->nb_streams; i++) {
3820 InputStream *ist = input_streams[f->ist_index + i];
3821 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3822 int64_t now = av_gettime_relative() - ist->start;
3824 return AVERROR(EAGAIN);
3829 if (nb_input_files > 1)
3830 return get_input_packet_mt(f, pkt);
3832 return av_read_frame(f->ctx, pkt);
3835 static int got_eagain(void)
3838 for (i = 0; i < nb_output_streams; i++)
3839 if (output_streams[i]->unavailable)
3844 static void reset_eagain(void)
3847 for (i = 0; i < nb_input_files; i++)
3848 input_files[i]->eagain = 0;
3849 for (i = 0; i < nb_output_streams; i++)
3850 output_streams[i]->unavailable = 0;
3853 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3854 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3855 AVRational time_base)
3861 return tmp_time_base;
3864 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3867 return tmp_time_base;
3873 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3876 AVCodecContext *avctx;
3877 int i, ret, has_audio = 0;
3878 int64_t duration = 0;
3880 ret = av_seek_frame(is, -1, is->start_time, 0);
3884 for (i = 0; i < ifile->nb_streams; i++) {
3885 ist = input_streams[ifile->ist_index + i];
3886 avctx = ist->dec_ctx;
3889 if (ist->decoding_needed) {
3890 process_input_packet(ist, NULL, 1);
3891 avcodec_flush_buffers(avctx);
3894 /* duration is the length of the last frame in a stream
3895 * when audio stream is present we don't care about
3896 * last video frame length because it's not defined exactly */
3897 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3901 for (i = 0; i < ifile->nb_streams; i++) {
3902 ist = input_streams[ifile->ist_index + i];
3903 avctx = ist->dec_ctx;
3906 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3907 AVRational sample_rate = {1, avctx->sample_rate};
3909 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3913 if (ist->framerate.num) {
3914 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3915 } else if (ist->st->avg_frame_rate.num) {
3916 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3917 } else duration = 1;
3919 if (!ifile->duration)
3920 ifile->time_base = ist->st->time_base;
3921 /* the total duration of the stream, max_pts - min_pts is
3922 * the duration of the stream without the last frame */
3923 duration += ist->max_pts - ist->min_pts;
3924 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3928 if (ifile->loop > 0)
3936 * - 0 -- one packet was read and processed
3937 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3938 * this function should be called again
3939 * - AVERROR_EOF -- this function should not be called again
3941 static int process_input(int file_index)
3943 InputFile *ifile = input_files[file_index];
3944 AVFormatContext *is;
3952 ret = get_input_packet(ifile, &pkt);
3954 if (ret == AVERROR(EAGAIN)) {
3958 if (ret < 0 && ifile->loop) {
3959 if ((ret = seek_to_start(ifile, is)) < 0)
3961 ret = get_input_packet(ifile, &pkt);
3962 if (ret == AVERROR(EAGAIN)) {
3968 if (ret != AVERROR_EOF) {
3969 print_error(is->filename, ret);
3974 for (i = 0; i < ifile->nb_streams; i++) {
3975 ist = input_streams[ifile->ist_index + i];
3976 if (ist->decoding_needed) {
3977 ret = process_input_packet(ist, NULL, 0);
3982 /* mark all outputs that don't go through lavfi as finished */
3983 for (j = 0; j < nb_output_streams; j++) {
3984 OutputStream *ost = output_streams[j];
3986 if (ost->source_index == ifile->ist_index + i &&
3987 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3988 finish_output_stream(ost);
3992 ifile->eof_reached = 1;
3993 return AVERROR(EAGAIN);
3999 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4000 is->streams[pkt.stream_index]);
4002 /* the following test is needed in case new streams appear
4003 dynamically in stream : we ignore them */
4004 if (pkt.stream_index >= ifile->nb_streams) {
4005 report_new_stream(file_index, &pkt);
4006 goto discard_packet;
4009 ist = input_streams[ifile->ist_index + pkt.stream_index];
4011 ist->data_size += pkt.size;
4015 goto discard_packet;
4017 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4018 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4023 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4024 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4025 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4026 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4027 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4028 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4029 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4030 av_ts2str(input_files[ist->file_index]->ts_offset),
4031 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4034 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4035 int64_t stime, stime2;
4036 // Correcting starttime based on the enabled streams
4037 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4038 // so we instead do it here as part of discontinuity handling
4039 if ( ist->next_dts == AV_NOPTS_VALUE
4040 && ifile->ts_offset == -is->start_time
4041 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4042 int64_t new_start_time = INT64_MAX;
4043 for (i=0; i<is->nb_streams; i++) {
4044 AVStream *st = is->streams[i];
4045 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4047 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4049 if (new_start_time > is->start_time) {
4050 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4051 ifile->ts_offset = -new_start_time;
4055 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4056 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4057 ist->wrap_correction_done = 1;
4059 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4060 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4061 ist->wrap_correction_done = 0;
4063 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4064 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4065 ist->wrap_correction_done = 0;
4069 /* add the stream-global side data to the first packet */
4070 if (ist->nb_packets == 1) {
4071 if (ist->st->nb_side_data)
4072 av_packet_split_side_data(&pkt);
4073 for (i = 0; i < ist->st->nb_side_data; i++) {
4074 AVPacketSideData *src_sd = &ist->st->side_data[i];
4077 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4079 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4082 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4086 memcpy(dst_data, src_sd->data, src_sd->size);
4090 if (pkt.dts != AV_NOPTS_VALUE)
4091 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4092 if (pkt.pts != AV_NOPTS_VALUE)
4093 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4095 if (pkt.pts != AV_NOPTS_VALUE)
4096 pkt.pts *= ist->ts_scale;
4097 if (pkt.dts != AV_NOPTS_VALUE)
4098 pkt.dts *= ist->ts_scale;
4100 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4101 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4102 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4103 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4104 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4105 int64_t delta = pkt_dts - ifile->last_ts;
4106 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4107 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4108 ifile->ts_offset -= delta;
4109 av_log(NULL, AV_LOG_DEBUG,
4110 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4111 delta, ifile->ts_offset);
4112 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4113 if (pkt.pts != AV_NOPTS_VALUE)
4114 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4118 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4119 if (pkt.pts != AV_NOPTS_VALUE) {
4120 pkt.pts += duration;
4121 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4122 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4125 if (pkt.dts != AV_NOPTS_VALUE)
4126 pkt.dts += duration;
4128 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4129 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4130 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4131 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4133 int64_t delta = pkt_dts - ist->next_dts;
4134 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4135 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4136 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4137 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4138 ifile->ts_offset -= delta;
4139 av_log(NULL, AV_LOG_DEBUG,
4140 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4141 delta, ifile->ts_offset);
4142 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4143 if (pkt.pts != AV_NOPTS_VALUE)
4144 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4147 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4148 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4149 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4150 pkt.dts = AV_NOPTS_VALUE;
4152 if (pkt.pts != AV_NOPTS_VALUE){
4153 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4154 delta = pkt_pts - ist->next_dts;
4155 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4156 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4157 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4158 pkt.pts = AV_NOPTS_VALUE;
4164 if (pkt.dts != AV_NOPTS_VALUE)
4165 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4168 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4169 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4170 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4171 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4172 av_ts2str(input_files[ist->file_index]->ts_offset),
4173 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4176 sub2video_heartbeat(ist, pkt.pts);
4178 process_input_packet(ist, &pkt, 0);
4181 av_packet_unref(&pkt);
4187 * Perform a step of transcoding for the specified filter graph.
4189 * @param[in] graph filter graph to consider
4190 * @param[out] best_ist input stream where a frame would allow to continue
4191 * @return 0 for success, <0 for error
4193 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4196 int nb_requests, nb_requests_max = 0;
4197 InputFilter *ifilter;
4201 ret = avfilter_graph_request_oldest(graph->graph);
4203 return reap_filters(0);
4205 if (ret == AVERROR_EOF) {
4206 ret = reap_filters(1);
4207 for (i = 0; i < graph->nb_outputs; i++)
4208 close_output_stream(graph->outputs[i]->ost);
4211 if (ret != AVERROR(EAGAIN))
4214 for (i = 0; i < graph->nb_inputs; i++) {
4215 ifilter = graph->inputs[i];
4217 if (input_files[ist->file_index]->eagain ||
4218 input_files[ist->file_index]->eof_reached)
4220 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4221 if (nb_requests > nb_requests_max) {
4222 nb_requests_max = nb_requests;
4228 for (i = 0; i < graph->nb_outputs; i++)
4229 graph->outputs[i]->ost->unavailable = 1;
4235 * Run a single step of transcoding.
4237 * @return 0 for success, <0 for error
4239 static int transcode_step(void)
4245 ost = choose_output();
4252 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4257 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4262 av_assert0(ost->source_index >= 0);
4263 ist = input_streams[ost->source_index];
4266 ret = process_input(ist->file_index);
4267 if (ret == AVERROR(EAGAIN)) {
4268 if (input_files[ist->file_index]->eagain)
4269 ost->unavailable = 1;
4274 return ret == AVERROR_EOF ? 0 : ret;
4276 return reap_filters(0);
4280 * The following code is the main loop of the file converter
4282 static int transcode(void)
4285 AVFormatContext *os;
4288 int64_t timer_start;
4289 int64_t total_packets_written = 0;
4291 ret = transcode_init();
4295 if (stdin_interaction) {
4296 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4299 timer_start = av_gettime_relative();
4302 if ((ret = init_input_threads()) < 0)
4306 while (!received_sigterm) {
4307 int64_t cur_time= av_gettime_relative();
4309 /* if 'q' pressed, exits */
4310 if (stdin_interaction)
4311 if (check_keyboard_interaction(cur_time) < 0)
4314 /* check if there's any stream where output is still needed */
4315 if (!need_output()) {
4316 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4320 ret = transcode_step();
4321 if (ret < 0 && ret != AVERROR_EOF) {
4323 av_strerror(ret, errbuf, sizeof(errbuf));
4325 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4329 /* dump report by using the output first video and audio streams */
4330 print_report(0, timer_start, cur_time);
4333 free_input_threads();
4336 /* at the end of stream, we must flush the decoder buffers */
4337 for (i = 0; i < nb_input_streams; i++) {
4338 ist = input_streams[i];
4339 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4340 process_input_packet(ist, NULL, 0);
4347 /* write the trailer if needed and close file */
4348 for (i = 0; i < nb_output_files; i++) {
4349 os = output_files[i]->ctx;
4350 if ((ret = av_write_trailer(os)) < 0) {
4351 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4357 /* dump report by using the first video and audio streams */
4358 print_report(1, timer_start, av_gettime_relative());
4360 /* close each encoder */
4361 for (i = 0; i < nb_output_streams; i++) {
4362 ost = output_streams[i];
4363 if (ost->encoding_needed) {
4364 av_freep(&ost->enc_ctx->stats_in);
4366 total_packets_written += ost->packets_written;
4369 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4370 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4374 /* close each decoder */
4375 for (i = 0; i < nb_input_streams; i++) {
4376 ist = input_streams[i];
4377 if (ist->decoding_needed) {
4378 avcodec_close(ist->dec_ctx);
4379 if (ist->hwaccel_uninit)
4380 ist->hwaccel_uninit(ist->dec_ctx);
4384 av_buffer_unref(&hw_device_ctx);
4391 free_input_threads();
4394 if (output_streams) {
4395 for (i = 0; i < nb_output_streams; i++) {
4396 ost = output_streams[i];
4399 if (fclose(ost->logfile))
4400 av_log(NULL, AV_LOG_ERROR,
4401 "Error closing logfile, loss of information possible: %s\n",
4402 av_err2str(AVERROR(errno)));
4403 ost->logfile = NULL;
4405 av_freep(&ost->forced_kf_pts);
4406 av_freep(&ost->apad);
4407 av_freep(&ost->disposition);
4408 av_dict_free(&ost->encoder_opts);
4409 av_dict_free(&ost->sws_dict);
4410 av_dict_free(&ost->swr_opts);
4411 av_dict_free(&ost->resample_opts);
4419 static int64_t getutime(void)
4422 struct rusage rusage;
4424 getrusage(RUSAGE_SELF, &rusage);
4425 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4426 #elif HAVE_GETPROCESSTIMES
4428 FILETIME c, e, k, u;
4429 proc = GetCurrentProcess();
4430 GetProcessTimes(proc, &c, &e, &k, &u);
4431 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4433 return av_gettime_relative();
4437 static int64_t getmaxrss(void)
4439 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4440 struct rusage rusage;
4441 getrusage(RUSAGE_SELF, &rusage);
4442 return (int64_t)rusage.ru_maxrss * 1024;
4443 #elif HAVE_GETPROCESSMEMORYINFO
4445 PROCESS_MEMORY_COUNTERS memcounters;
4446 proc = GetCurrentProcess();
4447 memcounters.cb = sizeof(memcounters);
4448 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4449 return memcounters.PeakPagefileUsage;
4455 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4459 int main(int argc, char **argv)
4466 register_exit(ffmpeg_cleanup);
4468 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4470 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4471 parse_loglevel(argc, argv, options);
4473 if(argc>1 && !strcmp(argv[1], "-d")){
4475 av_log_set_callback(log_callback_null);
4480 avcodec_register_all();
4482 avdevice_register_all();
4484 avfilter_register_all();
4486 avformat_network_init();
4488 show_banner(argc, argv, options);
4490 /* parse options and open all input/output files */
4491 ret = ffmpeg_parse_options(argc, argv);
4495 if (nb_output_files <= 0 && nb_input_files == 0) {
4497 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4501 /* file converter / grab */
4502 if (nb_output_files <= 0) {
4503 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4507 // if (nb_input_files == 0) {
4508 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4512 current_time = ti = getutime();
4513 if (transcode() < 0)
4515 ti = getutime() - ti;
4517 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4519 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4520 decode_error_stat[0], decode_error_stat[1]);
4521 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4524 exit_program(received_nb_signals ? 255 : main_return_code);
4525 return main_return_code;