2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
201 pal = (uint32_t *)r->data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
369 if (!run_as_daemon && stdin_interaction) {
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
509 for (j = 0; j < ost->nb_bitstream_filters; j++)
510 av_bsf_free(&ost->bsf_ctx[j]);
511 av_freep(&ost->bsf_ctx);
512 av_freep(&ost->bsf_extradata_updated);
514 av_frame_free(&ost->filtered_frame);
515 av_frame_free(&ost->last_frame);
516 av_dict_free(&ost->encoder_opts);
518 av_parser_close(ost->parser);
519 avcodec_free_context(&ost->parser_avctx);
521 av_freep(&ost->forced_keyframes);
522 av_expr_free(ost->forced_keyframes_pexpr);
523 av_freep(&ost->avfilter);
524 av_freep(&ost->logfile_prefix);
526 av_freep(&ost->audio_channels_map);
527 ost->audio_channels_mapped = 0;
529 av_dict_free(&ost->sws_dict);
531 avcodec_free_context(&ost->enc_ctx);
532 avcodec_parameters_free(&ost->ref_par);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
553 av_freep(&ist->dts_buffer);
555 avcodec_free_context(&ist->dec_ctx);
557 av_freep(&input_streams[i]);
561 if (fclose(vstats_file))
562 av_log(NULL, AV_LOG_ERROR,
563 "Error closing vstats file, loss of information possible: %s\n",
564 av_err2str(AVERROR(errno)));
566 av_freep(&vstats_filename);
568 av_freep(&input_streams);
569 av_freep(&input_files);
570 av_freep(&output_streams);
571 av_freep(&output_files);
575 avformat_network_deinit();
577 if (received_sigterm) {
578 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579 (int) received_sigterm);
580 } else if (ret && transcode_init_done) {
581 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
587 void remove_avoptions(AVDictionary **a, AVDictionary *b)
589 AVDictionaryEntry *t = NULL;
591 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
596 void assert_avoptions(AVDictionary *m)
598 AVDictionaryEntry *t;
599 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
605 static void abort_codec_experimental(AVCodec *c, int encoder)
610 static void update_benchmark(const char *fmt, ...)
612 if (do_benchmark_all) {
613 int64_t t = getutime();
619 vsnprintf(buf, sizeof(buf), fmt, va);
621 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
630 for (i = 0; i < nb_output_streams; i++) {
631 OutputStream *ost2 = output_streams[i];
632 ost2->finished |= ost == ost2 ? this_stream : others;
636 static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
638 AVStream *st = ost->st;
641 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
642 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
643 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
646 * Audio encoders may split the packets -- #frames in != #packets out.
647 * But there is no reordering, so we can limit the number of output packets
648 * by simply dropping them here.
649 * Counting encoded video frames needs to be done separately because of
650 * reordering, see do_video_out()
652 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
653 if (ost->frame_number >= ost->max_frames) {
654 av_packet_unref(pkt);
659 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
661 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
663 ost->quality = sd ? AV_RL32(sd) : -1;
664 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
666 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
668 ost->error[i] = AV_RL64(sd + 8 + 8*i);
673 if (ost->frame_rate.num && ost->is_cfr) {
674 if (pkt->duration > 0)
675 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
676 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
681 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
682 if (pkt->dts != AV_NOPTS_VALUE &&
683 pkt->pts != AV_NOPTS_VALUE &&
684 pkt->dts > pkt->pts) {
685 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
687 ost->file_index, ost->st->index);
689 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
690 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
691 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
693 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
694 pkt->dts != AV_NOPTS_VALUE &&
695 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
696 ost->last_mux_dts != AV_NOPTS_VALUE) {
697 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
698 if (pkt->dts < max) {
699 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
700 av_log(s, loglevel, "Non-monotonous DTS in output stream "
701 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
702 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
704 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
707 av_log(s, loglevel, "changing to %"PRId64". This may result "
708 "in incorrect timestamps in the output file.\n",
710 if (pkt->pts >= pkt->dts)
711 pkt->pts = FFMAX(pkt->pts, max);
716 ost->last_mux_dts = pkt->dts;
718 ost->data_size += pkt->size;
719 ost->packets_written++;
721 pkt->stream_index = ost->index;
724 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
725 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
726 av_get_media_type_string(ost->enc_ctx->codec_type),
727 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
728 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
733 ret = av_interleaved_write_frame(s, pkt);
735 print_error("av_interleaved_write_frame()", ret);
736 main_return_code = 1;
737 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
739 av_packet_unref(pkt);
742 static void close_output_stream(OutputStream *ost)
744 OutputFile *of = output_files[ost->file_index];
746 ost->finished |= ENCODER_FINISHED;
748 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
749 of->recording_time = FFMIN(of->recording_time, end);
753 static void output_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
757 /* apply the output bitstream filters, if any */
758 if (ost->nb_bitstream_filters) {
761 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
767 /* get a packet from the previous filter up the chain */
768 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
769 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
770 * the api states this shouldn't happen after init(). Propagate it here to the
771 * muxer and to the next filters in the chain to workaround this.
772 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
773 * par_out->extradata and adapt muxers accordingly to get rid of this. */
774 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
775 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
778 ost->bsf_extradata_updated[idx - 1] |= 1;
780 if (ret == AVERROR(EAGAIN)) {
787 /* send it to the next filter down the chain or to the muxer */
788 if (idx < ost->nb_bitstream_filters) {
789 /* HACK/FIXME! - See above */
790 if (!(ost->bsf_extradata_updated[idx] & 2)) {
791 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
794 ost->bsf_extradata_updated[idx] |= 2;
796 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
801 write_packet(s, pkt, ost);
804 write_packet(s, pkt, ost);
807 if (ret < 0 && ret != AVERROR_EOF) {
808 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
809 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
815 static int check_recording_time(OutputStream *ost)
817 OutputFile *of = output_files[ost->file_index];
819 if (of->recording_time != INT64_MAX &&
820 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
821 AV_TIME_BASE_Q) >= 0) {
822 close_output_stream(ost);
828 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
831 AVCodecContext *enc = ost->enc_ctx;
835 av_init_packet(&pkt);
839 if (!check_recording_time(ost))
842 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
843 frame->pts = ost->sync_opts;
844 ost->sync_opts = frame->pts + frame->nb_samples;
845 ost->samples_encoded += frame->nb_samples;
846 ost->frames_encoded++;
848 av_assert0(pkt.size || !pkt.data);
849 update_benchmark(NULL);
851 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
852 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
853 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
854 enc->time_base.num, enc->time_base.den);
857 ret = avcodec_send_frame(enc, frame);
862 ret = avcodec_receive_packet(enc, &pkt);
863 if (ret == AVERROR(EAGAIN))
868 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
870 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
873 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
874 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
875 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
876 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
879 output_packet(s, &pkt, ost);
884 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
888 static void do_subtitle_out(AVFormatContext *s,
893 int subtitle_out_max_size = 1024 * 1024;
894 int subtitle_out_size, nb, i;
899 if (sub->pts == AV_NOPTS_VALUE) {
900 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
909 subtitle_out = av_malloc(subtitle_out_max_size);
911 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
916 /* Note: DVB subtitle need one packet to draw them and one other
917 packet to clear them */
918 /* XXX: signal it in the codec context ? */
919 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
924 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
926 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
927 pts -= output_files[ost->file_index]->start_time;
928 for (i = 0; i < nb; i++) {
929 unsigned save_num_rects = sub->num_rects;
931 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
932 if (!check_recording_time(ost))
936 // start_display_time is required to be 0
937 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
938 sub->end_display_time -= sub->start_display_time;
939 sub->start_display_time = 0;
943 ost->frames_encoded++;
945 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
946 subtitle_out_max_size, sub);
948 sub->num_rects = save_num_rects;
949 if (subtitle_out_size < 0) {
950 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
954 av_init_packet(&pkt);
955 pkt.data = subtitle_out;
956 pkt.size = subtitle_out_size;
957 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
958 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
959 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
960 /* XXX: the pts correction is handled here. Maybe handling
961 it in the codec would be better */
963 pkt.pts += 90 * sub->start_display_time;
965 pkt.pts += 90 * sub->end_display_time;
968 output_packet(s, &pkt, ost);
972 static void do_video_out(AVFormatContext *s,
974 AVFrame *next_picture,
977 int ret, format_video_sync;
979 AVCodecContext *enc = ost->enc_ctx;
980 AVCodecParameters *mux_par = ost->st->codecpar;
981 int nb_frames, nb0_frames, i;
982 double delta, delta0;
985 InputStream *ist = NULL;
986 AVFilterContext *filter = ost->filter->filter;
988 if (ost->source_index >= 0)
989 ist = input_streams[ost->source_index];
991 if (filter->inputs[0]->frame_rate.num > 0 &&
992 filter->inputs[0]->frame_rate.den > 0)
993 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
995 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
996 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
998 if (!ost->filters_script &&
1002 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1003 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1006 if (!next_picture) {
1008 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1009 ost->last_nb0_frames[1],
1010 ost->last_nb0_frames[2]);
1012 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1013 delta = delta0 + duration;
1015 /* by default, we output a single frame */
1016 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1019 format_video_sync = video_sync_method;
1020 if (format_video_sync == VSYNC_AUTO) {
1021 if(!strcmp(s->oformat->name, "avi")) {
1022 format_video_sync = VSYNC_VFR;
1024 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1026 && format_video_sync == VSYNC_CFR
1027 && input_files[ist->file_index]->ctx->nb_streams == 1
1028 && input_files[ist->file_index]->input_ts_offset == 0) {
1029 format_video_sync = VSYNC_VSCFR;
1031 if (format_video_sync == VSYNC_CFR && copy_ts) {
1032 format_video_sync = VSYNC_VSCFR;
1035 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1039 format_video_sync != VSYNC_PASSTHROUGH &&
1040 format_video_sync != VSYNC_DROP) {
1041 if (delta0 < -0.6) {
1042 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1044 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1045 sync_ipts = ost->sync_opts;
1050 switch (format_video_sync) {
1052 if (ost->frame_number == 0 && delta0 >= 0.5) {
1053 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1056 ost->sync_opts = lrint(sync_ipts);
1059 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1060 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1062 } else if (delta < -1.1)
1064 else if (delta > 1.1) {
1065 nb_frames = lrintf(delta);
1067 nb0_frames = lrintf(delta0 - 0.6);
1073 else if (delta > 0.6)
1074 ost->sync_opts = lrint(sync_ipts);
1077 case VSYNC_PASSTHROUGH:
1078 ost->sync_opts = lrint(sync_ipts);
1085 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1086 nb0_frames = FFMIN(nb0_frames, nb_frames);
1088 memmove(ost->last_nb0_frames + 1,
1089 ost->last_nb0_frames,
1090 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1091 ost->last_nb0_frames[0] = nb0_frames;
1093 if (nb0_frames == 0 && ost->last_dropped) {
1095 av_log(NULL, AV_LOG_VERBOSE,
1096 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1097 ost->frame_number, ost->st->index, ost->last_frame->pts);
1099 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1100 if (nb_frames > dts_error_threshold * 30) {
1101 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1105 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1106 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1108 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1110 /* duplicates frame if needed */
1111 for (i = 0; i < nb_frames; i++) {
1112 AVFrame *in_picture;
1113 av_init_packet(&pkt);
1117 if (i < nb0_frames && ost->last_frame) {
1118 in_picture = ost->last_frame;
1120 in_picture = next_picture;
1125 in_picture->pts = ost->sync_opts;
1128 if (!check_recording_time(ost))
1130 if (ost->frame_number >= ost->max_frames)
1134 #if FF_API_LAVF_FMT_RAWPICTURE
1135 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1136 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1137 /* raw pictures are written as AVPicture structure to
1138 avoid any copies. We support temporarily the older
1140 if (in_picture->interlaced_frame)
1141 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1143 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1144 pkt.data = (uint8_t *)in_picture;
1145 pkt.size = sizeof(AVPicture);
1146 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1147 pkt.flags |= AV_PKT_FLAG_KEY;
1149 output_packet(s, &pkt, ost);
1153 int forced_keyframe = 0;
1156 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1157 ost->top_field_first >= 0)
1158 in_picture->top_field_first = !!ost->top_field_first;
1160 if (in_picture->interlaced_frame) {
1161 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1162 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1164 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1166 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1168 in_picture->quality = enc->global_quality;
1169 in_picture->pict_type = 0;
1171 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1172 in_picture->pts * av_q2d(enc->time_base) : NAN;
1173 if (ost->forced_kf_index < ost->forced_kf_count &&
1174 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1175 ost->forced_kf_index++;
1176 forced_keyframe = 1;
1177 } else if (ost->forced_keyframes_pexpr) {
1179 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1180 res = av_expr_eval(ost->forced_keyframes_pexpr,
1181 ost->forced_keyframes_expr_const_values, NULL);
1182 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1183 ost->forced_keyframes_expr_const_values[FKF_N],
1184 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1185 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1186 ost->forced_keyframes_expr_const_values[FKF_T],
1187 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1190 forced_keyframe = 1;
1191 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1192 ost->forced_keyframes_expr_const_values[FKF_N];
1193 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1194 ost->forced_keyframes_expr_const_values[FKF_T];
1195 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1198 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1199 } else if ( ost->forced_keyframes
1200 && !strncmp(ost->forced_keyframes, "source", 6)
1201 && in_picture->key_frame==1) {
1202 forced_keyframe = 1;
1205 if (forced_keyframe) {
1206 in_picture->pict_type = AV_PICTURE_TYPE_I;
1207 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1210 update_benchmark(NULL);
1212 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1213 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1214 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1215 enc->time_base.num, enc->time_base.den);
1218 ost->frames_encoded++;
1220 ret = avcodec_send_frame(enc, in_picture);
1225 ret = avcodec_receive_packet(enc, &pkt);
1226 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1227 if (ret == AVERROR(EAGAIN))
1233 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1234 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1235 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1236 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1239 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1240 pkt.pts = ost->sync_opts;
1242 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1245 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1246 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1247 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1248 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1251 frame_size = pkt.size;
1252 output_packet(s, &pkt, ost);
1254 /* if two pass, output log */
1255 if (ost->logfile && enc->stats_out) {
1256 fprintf(ost->logfile, "%s", enc->stats_out);
1262 * For video, number of frames in == number of packets out.
1263 * But there may be reordering, so we can't throw away frames on encoder
1264 * flush, we need to limit them here, before they go into encoder.
1266 ost->frame_number++;
1268 if (vstats_filename && frame_size)
1269 do_video_stats(ost, frame_size);
1272 if (!ost->last_frame)
1273 ost->last_frame = av_frame_alloc();
1274 av_frame_unref(ost->last_frame);
1275 if (next_picture && ost->last_frame)
1276 av_frame_ref(ost->last_frame, next_picture);
1278 av_frame_free(&ost->last_frame);
1282 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1286 static double psnr(double d)
1288 return -10.0 * log10(d);
1291 static void do_video_stats(OutputStream *ost, int frame_size)
1293 AVCodecContext *enc;
1295 double ti1, bitrate, avg_bitrate;
1297 /* this is executed just the first time do_video_stats is called */
1299 vstats_file = fopen(vstats_filename, "w");
1307 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1308 frame_number = ost->st->nb_frames;
1309 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1310 ost->quality / (float)FF_QP2LAMBDA);
1312 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1313 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1315 fprintf(vstats_file,"f_size= %6d ", frame_size);
1316 /* compute pts value */
1317 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1321 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1322 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1323 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1324 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1325 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1329 static void finish_output_stream(OutputStream *ost)
1331 OutputFile *of = output_files[ost->file_index];
1334 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1337 for (i = 0; i < of->ctx->nb_streams; i++)
1338 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1343 * Get and encode new output from any of the filtergraphs, without causing
1346 * @return 0 for success, <0 for severe errors
1348 static int reap_filters(int flush)
1350 AVFrame *filtered_frame = NULL;
1353 /* Reap all buffers present in the buffer sinks */
1354 for (i = 0; i < nb_output_streams; i++) {
1355 OutputStream *ost = output_streams[i];
1356 OutputFile *of = output_files[ost->file_index];
1357 AVFilterContext *filter;
1358 AVCodecContext *enc = ost->enc_ctx;
1363 filter = ost->filter->filter;
1365 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1366 return AVERROR(ENOMEM);
1368 filtered_frame = ost->filtered_frame;
1371 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1372 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1373 AV_BUFFERSINK_FLAG_NO_REQUEST);
1375 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1376 av_log(NULL, AV_LOG_WARNING,
1377 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1378 } else if (flush && ret == AVERROR_EOF) {
1379 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1380 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1384 if (ost->finished) {
1385 av_frame_unref(filtered_frame);
1388 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1389 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1390 AVRational tb = enc->time_base;
1391 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1393 tb.den <<= extra_bits;
1395 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1396 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1397 float_pts /= 1 << extra_bits;
1398 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1399 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1401 filtered_frame->pts =
1402 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1403 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1405 //if (ost->source_index >= 0)
1406 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1408 switch (filter->inputs[0]->type) {
1409 case AVMEDIA_TYPE_VIDEO:
1410 if (!ost->frame_aspect_ratio.num)
1411 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1414 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1415 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1417 enc->time_base.num, enc->time_base.den);
1420 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1422 case AVMEDIA_TYPE_AUDIO:
1423 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1424 enc->channels != av_frame_get_channels(filtered_frame)) {
1425 av_log(NULL, AV_LOG_ERROR,
1426 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1429 do_audio_out(of->ctx, ost, filtered_frame);
1432 // TODO support subtitle filters
1436 av_frame_unref(filtered_frame);
1443 static void print_final_stats(int64_t total_size)
1445 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1446 uint64_t subtitle_size = 0;
1447 uint64_t data_size = 0;
1448 float percent = -1.0;
1452 for (i = 0; i < nb_output_streams; i++) {
1453 OutputStream *ost = output_streams[i];
1454 switch (ost->enc_ctx->codec_type) {
1455 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1456 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1457 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1458 default: other_size += ost->data_size; break;
1460 extra_size += ost->enc_ctx->extradata_size;
1461 data_size += ost->data_size;
1462 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1463 != AV_CODEC_FLAG_PASS1)
1467 if (data_size && total_size>0 && total_size >= data_size)
1468 percent = 100.0 * (total_size - data_size) / data_size;
1470 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1471 video_size / 1024.0,
1472 audio_size / 1024.0,
1473 subtitle_size / 1024.0,
1474 other_size / 1024.0,
1475 extra_size / 1024.0);
1477 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1479 av_log(NULL, AV_LOG_INFO, "unknown");
1480 av_log(NULL, AV_LOG_INFO, "\n");
1482 /* print verbose per-stream stats */
1483 for (i = 0; i < nb_input_files; i++) {
1484 InputFile *f = input_files[i];
1485 uint64_t total_packets = 0, total_size = 0;
1487 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1488 i, f->ctx->filename);
1490 for (j = 0; j < f->nb_streams; j++) {
1491 InputStream *ist = input_streams[f->ist_index + j];
1492 enum AVMediaType type = ist->dec_ctx->codec_type;
1494 total_size += ist->data_size;
1495 total_packets += ist->nb_packets;
1497 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1498 i, j, media_type_string(type));
1499 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1500 ist->nb_packets, ist->data_size);
1502 if (ist->decoding_needed) {
1503 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1504 ist->frames_decoded);
1505 if (type == AVMEDIA_TYPE_AUDIO)
1506 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1507 av_log(NULL, AV_LOG_VERBOSE, "; ");
1510 av_log(NULL, AV_LOG_VERBOSE, "\n");
1513 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1514 total_packets, total_size);
1517 for (i = 0; i < nb_output_files; i++) {
1518 OutputFile *of = output_files[i];
1519 uint64_t total_packets = 0, total_size = 0;
1521 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1522 i, of->ctx->filename);
1524 for (j = 0; j < of->ctx->nb_streams; j++) {
1525 OutputStream *ost = output_streams[of->ost_index + j];
1526 enum AVMediaType type = ost->enc_ctx->codec_type;
1528 total_size += ost->data_size;
1529 total_packets += ost->packets_written;
1531 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1532 i, j, media_type_string(type));
1533 if (ost->encoding_needed) {
1534 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1535 ost->frames_encoded);
1536 if (type == AVMEDIA_TYPE_AUDIO)
1537 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1538 av_log(NULL, AV_LOG_VERBOSE, "; ");
1541 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1542 ost->packets_written, ost->data_size);
1544 av_log(NULL, AV_LOG_VERBOSE, "\n");
1547 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1548 total_packets, total_size);
1550 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1551 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1553 av_log(NULL, AV_LOG_WARNING, "\n");
1555 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1560 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1563 AVBPrint buf_script;
1565 AVFormatContext *oc;
1567 AVCodecContext *enc;
1568 int frame_number, vid, i;
1571 int64_t pts = INT64_MIN + 1;
1572 static int64_t last_time = -1;
1573 static int qp_histogram[52];
1574 int hours, mins, secs, us;
1578 if (!print_stats && !is_last_report && !progress_avio)
1581 if (!is_last_report) {
1582 if (last_time == -1) {
1583 last_time = cur_time;
1586 if ((cur_time - last_time) < 500000)
1588 last_time = cur_time;
1591 t = (cur_time-timer_start) / 1000000.0;
1594 oc = output_files[0]->ctx;
1596 total_size = avio_size(oc->pb);
1597 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1598 total_size = avio_tell(oc->pb);
1602 av_bprint_init(&buf_script, 0, 1);
1603 for (i = 0; i < nb_output_streams; i++) {
1605 ost = output_streams[i];
1607 if (!ost->stream_copy)
1608 q = ost->quality / (float) FF_QP2LAMBDA;
1610 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1611 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1612 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1613 ost->file_index, ost->index, q);
1615 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1618 frame_number = ost->frame_number;
1619 fps = t > 1 ? frame_number / t : 0;
1620 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1621 frame_number, fps < 9.95, fps, q);
1622 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1623 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1624 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1625 ost->file_index, ost->index, q);
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1631 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1633 for (j = 0; j < 32; j++)
1634 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1637 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1639 double error, error_sum = 0;
1640 double scale, scale_sum = 0;
1642 char type[3] = { 'Y','U','V' };
1643 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1644 for (j = 0; j < 3; j++) {
1645 if (is_last_report) {
1646 error = enc->error[j];
1647 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1649 error = ost->error[j];
1650 scale = enc->width * enc->height * 255.0 * 255.0;
1656 p = psnr(error / scale);
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1658 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1659 ost->file_index, ost->index, type[j] | 32, p);
1661 p = psnr(error_sum / scale_sum);
1662 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1663 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1664 ost->file_index, ost->index, p);
1668 /* compute min output value */
1669 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1670 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1671 ost->st->time_base, AV_TIME_BASE_Q));
1673 nb_frames_drop += ost->last_dropped;
1676 secs = FFABS(pts) / AV_TIME_BASE;
1677 us = FFABS(pts) % AV_TIME_BASE;
1683 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1684 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1686 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1688 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1689 "size=%8.0fkB time=", total_size / 1024.0);
1691 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1692 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1693 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1694 (100 * us) / AV_TIME_BASE);
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1698 av_bprintf(&buf_script, "bitrate=N/A\n");
1700 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1701 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1704 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1705 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1706 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1707 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1708 hours, mins, secs, us);
1710 if (nb_frames_dup || nb_frames_drop)
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1712 nb_frames_dup, nb_frames_drop);
1713 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1714 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1717 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1718 av_bprintf(&buf_script, "speed=N/A\n");
1720 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1721 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1724 if (print_stats || is_last_report) {
1725 const char end = is_last_report ? '\n' : '\r';
1726 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1727 fprintf(stderr, "%s %c", buf, end);
1729 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1734 if (progress_avio) {
1735 av_bprintf(&buf_script, "progress=%s\n",
1736 is_last_report ? "end" : "continue");
1737 avio_write(progress_avio, buf_script.str,
1738 FFMIN(buf_script.len, buf_script.size - 1));
1739 avio_flush(progress_avio);
1740 av_bprint_finalize(&buf_script, NULL);
1741 if (is_last_report) {
1742 if ((ret = avio_closep(&progress_avio)) < 0)
1743 av_log(NULL, AV_LOG_ERROR,
1744 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1749 print_final_stats(total_size);
1752 static void flush_encoders(void)
1756 for (i = 0; i < nb_output_streams; i++) {
1757 OutputStream *ost = output_streams[i];
1758 AVCodecContext *enc = ost->enc_ctx;
1759 AVFormatContext *os = output_files[ost->file_index]->ctx;
1760 int stop_encoding = 0;
1762 if (!ost->encoding_needed)
1765 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1767 #if FF_API_LAVF_FMT_RAWPICTURE
1768 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1772 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1775 avcodec_send_frame(enc, NULL);
1778 const char *desc = NULL;
1780 switch (enc->codec_type) {
1781 case AVMEDIA_TYPE_AUDIO:
1784 case AVMEDIA_TYPE_VIDEO:
1794 av_init_packet(&pkt);
1798 update_benchmark(NULL);
1799 ret = avcodec_receive_packet(enc, &pkt);
1800 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1801 if (ret < 0 && ret != AVERROR_EOF) {
1802 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1807 if (ost->logfile && enc->stats_out) {
1808 fprintf(ost->logfile, "%s", enc->stats_out);
1810 if (ret == AVERROR_EOF) {
1814 if (ost->finished & MUXER_FINISHED) {
1815 av_packet_unref(&pkt);
1818 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1819 pkt_size = pkt.size;
1820 output_packet(os, &pkt, ost);
1821 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1822 do_video_stats(ost, pkt_size);
1833 * Check whether a packet from ist should be written into ost at this time
1835 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1837 OutputFile *of = output_files[ost->file_index];
1838 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1840 if (ost->source_index != ist_index)
1846 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1852 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1854 OutputFile *of = output_files[ost->file_index];
1855 InputFile *f = input_files [ist->file_index];
1856 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1857 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1861 av_init_packet(&opkt);
1863 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1864 !ost->copy_initial_nonkeyframes)
1867 if (!ost->frame_number && !ost->copy_prior_start) {
1868 int64_t comp_start = start_time;
1869 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1870 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1871 if (pkt->pts == AV_NOPTS_VALUE ?
1872 ist->pts < comp_start :
1873 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1877 if (of->recording_time != INT64_MAX &&
1878 ist->pts >= of->recording_time + start_time) {
1879 close_output_stream(ost);
1883 if (f->recording_time != INT64_MAX) {
1884 start_time = f->ctx->start_time;
1885 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1886 start_time += f->start_time;
1887 if (ist->pts >= f->recording_time + start_time) {
1888 close_output_stream(ost);
1893 /* force the input stream PTS */
1894 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1897 if (pkt->pts != AV_NOPTS_VALUE)
1898 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1900 opkt.pts = AV_NOPTS_VALUE;
1902 if (pkt->dts == AV_NOPTS_VALUE)
1903 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1905 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1906 opkt.dts -= ost_tb_start_time;
1908 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1909 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1911 duration = ist->dec_ctx->frame_size;
1912 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1913 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1914 ost->st->time_base) - ost_tb_start_time;
1917 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1918 opkt.flags = pkt->flags;
1919 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1920 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1921 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1922 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1923 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1925 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1926 &opkt.data, &opkt.size,
1927 pkt->data, pkt->size,
1928 pkt->flags & AV_PKT_FLAG_KEY);
1930 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1935 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1940 opkt.data = pkt->data;
1941 opkt.size = pkt->size;
1943 av_copy_packet_side_data(&opkt, pkt);
1945 #if FF_API_LAVF_FMT_RAWPICTURE
1946 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1947 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1948 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1949 /* store AVPicture in AVPacket, as expected by the output format */
1950 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1952 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1956 opkt.data = (uint8_t *)&pict;
1957 opkt.size = sizeof(AVPicture);
1958 opkt.flags |= AV_PKT_FLAG_KEY;
1962 output_packet(of->ctx, &opkt, ost);
1965 int guess_input_channel_layout(InputStream *ist)
1967 AVCodecContext *dec = ist->dec_ctx;
1969 if (!dec->channel_layout) {
1970 char layout_name[256];
1972 if (dec->channels > ist->guess_layout_max)
1974 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1975 if (!dec->channel_layout)
1977 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1978 dec->channels, dec->channel_layout);
1979 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1980 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1985 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1987 if (*got_output || ret<0)
1988 decode_error_stat[ret<0] ++;
1990 if (ret < 0 && exit_on_error)
1993 if (exit_on_error && *got_output && ist) {
1994 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1995 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2001 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2002 // There is the following difference: if you got a frame, you must call
2003 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2004 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2005 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2012 ret = avcodec_send_packet(avctx, pkt);
2013 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2014 // decoded frames with avcodec_receive_frame() until done.
2015 if (ret < 0 && ret != AVERROR_EOF)
2019 ret = avcodec_receive_frame(avctx, frame);
2020 if (ret < 0 && ret != AVERROR(EAGAIN))
2028 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2030 AVFrame *decoded_frame, *f;
2031 AVCodecContext *avctx = ist->dec_ctx;
2032 int i, ret, err = 0, resample_changed;
2033 AVRational decoded_frame_tb;
2035 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2036 return AVERROR(ENOMEM);
2037 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2038 return AVERROR(ENOMEM);
2039 decoded_frame = ist->decoded_frame;
2041 update_benchmark(NULL);
2042 ret = decode(avctx, decoded_frame, got_output, pkt);
2043 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2045 if (ret >= 0 && avctx->sample_rate <= 0) {
2046 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2047 ret = AVERROR_INVALIDDATA;
2050 if (ret != AVERROR_EOF)
2051 check_decode_result(ist, got_output, ret);
2053 if (!*got_output || ret < 0)
2056 ist->samples_decoded += decoded_frame->nb_samples;
2057 ist->frames_decoded++;
2060 /* increment next_dts to use for the case where the input stream does not
2061 have timestamps or there are multiple frames in the packet */
2062 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2064 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2068 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2069 ist->resample_channels != avctx->channels ||
2070 ist->resample_channel_layout != decoded_frame->channel_layout ||
2071 ist->resample_sample_rate != decoded_frame->sample_rate;
2072 if (resample_changed) {
2073 char layout1[64], layout2[64];
2075 if (!guess_input_channel_layout(ist)) {
2076 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2077 "layout for Input Stream #%d.%d\n", ist->file_index,
2081 decoded_frame->channel_layout = avctx->channel_layout;
2083 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2084 ist->resample_channel_layout);
2085 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2086 decoded_frame->channel_layout);
2088 av_log(NULL, AV_LOG_INFO,
2089 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2090 ist->file_index, ist->st->index,
2091 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2092 ist->resample_channels, layout1,
2093 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2094 avctx->channels, layout2);
2096 ist->resample_sample_fmt = decoded_frame->format;
2097 ist->resample_sample_rate = decoded_frame->sample_rate;
2098 ist->resample_channel_layout = decoded_frame->channel_layout;
2099 ist->resample_channels = avctx->channels;
2101 for (i = 0; i < nb_filtergraphs; i++)
2102 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2103 FilterGraph *fg = filtergraphs[i];
2104 if (configure_filtergraph(fg) < 0) {
2105 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2111 /* if the decoder provides a pts, use it instead of the last packet pts.
2112 the decoder could be delaying output by a packet or more. */
2113 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2114 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2115 decoded_frame_tb = avctx->time_base;
2116 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2117 decoded_frame->pts = decoded_frame->pkt_pts;
2118 decoded_frame_tb = ist->st->time_base;
2119 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2120 decoded_frame->pts = pkt->pts;
2121 decoded_frame_tb = ist->st->time_base;
2123 decoded_frame->pts = ist->dts;
2124 decoded_frame_tb = AV_TIME_BASE_Q;
2126 if (decoded_frame->pts != AV_NOPTS_VALUE)
2127 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2128 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2129 (AVRational){1, avctx->sample_rate});
2130 ist->nb_samples = decoded_frame->nb_samples;
2131 for (i = 0; i < ist->nb_filters; i++) {
2132 if (i < ist->nb_filters - 1) {
2133 f = ist->filter_frame;
2134 err = av_frame_ref(f, decoded_frame);
2139 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2140 AV_BUFFERSRC_FLAG_PUSH);
2141 if (err == AVERROR_EOF)
2142 err = 0; /* ignore */
2146 decoded_frame->pts = AV_NOPTS_VALUE;
2148 av_frame_unref(ist->filter_frame);
2149 av_frame_unref(decoded_frame);
2150 return err < 0 ? err : ret;
2153 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2155 AVFrame *decoded_frame, *f;
2156 int i, ret = 0, err = 0, resample_changed;
2157 int64_t best_effort_timestamp;
2158 int64_t dts = AV_NOPTS_VALUE;
2159 AVRational *frame_sample_aspect;
2162 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2163 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2165 if (!eof && pkt && pkt->size == 0)
2168 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2169 return AVERROR(ENOMEM);
2170 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2171 return AVERROR(ENOMEM);
2172 decoded_frame = ist->decoded_frame;
2173 if (ist->dts != AV_NOPTS_VALUE)
2174 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2177 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2180 // The old code used to set dts on the drain packet, which does not work
2181 // with the new API anymore.
2183 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2185 return AVERROR(ENOMEM);
2186 ist->dts_buffer = new;
2187 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2190 update_benchmark(NULL);
2191 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2192 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2194 // The following line may be required in some cases where there is no parser
2195 // or the parser does not has_b_frames correctly
2196 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2197 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2198 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2200 av_log(ist->dec_ctx, AV_LOG_WARNING,
2201 "video_delay is larger in decoder than demuxer %d > %d.\n"
2202 "If you want to help, upload a sample "
2203 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2204 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2205 ist->dec_ctx->has_b_frames,
2206 ist->st->codecpar->video_delay);
2209 if (ret != AVERROR_EOF)
2210 check_decode_result(ist, got_output, ret);
2212 if (*got_output && ret >= 0) {
2213 if (ist->dec_ctx->width != decoded_frame->width ||
2214 ist->dec_ctx->height != decoded_frame->height ||
2215 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2216 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2217 decoded_frame->width,
2218 decoded_frame->height,
2219 decoded_frame->format,
2220 ist->dec_ctx->width,
2221 ist->dec_ctx->height,
2222 ist->dec_ctx->pix_fmt);
2226 if (!*got_output || ret < 0)
2229 if(ist->top_field_first>=0)
2230 decoded_frame->top_field_first = ist->top_field_first;
2232 ist->frames_decoded++;
2234 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2235 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2239 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2241 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2243 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2244 best_effort_timestamp = ist->dts_buffer[0];
2246 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2247 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2248 ist->nb_dts_buffer--;
2251 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2252 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2254 if (ts != AV_NOPTS_VALUE)
2255 ist->next_pts = ist->pts = ts;
2259 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2260 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2261 ist->st->index, av_ts2str(decoded_frame->pts),
2262 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2263 best_effort_timestamp,
2264 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2265 decoded_frame->key_frame, decoded_frame->pict_type,
2266 ist->st->time_base.num, ist->st->time_base.den);
2269 if (ist->st->sample_aspect_ratio.num)
2270 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2272 resample_changed = ist->resample_width != decoded_frame->width ||
2273 ist->resample_height != decoded_frame->height ||
2274 ist->resample_pix_fmt != decoded_frame->format;
2275 if (resample_changed) {
2276 av_log(NULL, AV_LOG_INFO,
2277 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2278 ist->file_index, ist->st->index,
2279 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2280 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2282 ist->resample_width = decoded_frame->width;
2283 ist->resample_height = decoded_frame->height;
2284 ist->resample_pix_fmt = decoded_frame->format;
2286 for (i = 0; i < nb_filtergraphs; i++) {
2287 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2288 configure_filtergraph(filtergraphs[i]) < 0) {
2289 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2295 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2296 for (i = 0; i < ist->nb_filters; i++) {
2297 if (!frame_sample_aspect->num)
2298 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2300 if (i < ist->nb_filters - 1) {
2301 f = ist->filter_frame;
2302 err = av_frame_ref(f, decoded_frame);
2307 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2308 if (err == AVERROR_EOF) {
2309 err = 0; /* ignore */
2310 } else if (err < 0) {
2311 av_log(NULL, AV_LOG_FATAL,
2312 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2318 av_frame_unref(ist->filter_frame);
2319 av_frame_unref(decoded_frame);
2320 return err < 0 ? err : ret;
2323 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2325 AVSubtitle subtitle;
2326 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2327 &subtitle, got_output, pkt);
2329 check_decode_result(NULL, got_output, ret);
2331 if (ret < 0 || !*got_output) {
2333 sub2video_flush(ist);
2337 if (ist->fix_sub_duration) {
2339 if (ist->prev_sub.got_output) {
2340 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2341 1000, AV_TIME_BASE);
2342 if (end < ist->prev_sub.subtitle.end_display_time) {
2343 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2344 "Subtitle duration reduced from %d to %d%s\n",
2345 ist->prev_sub.subtitle.end_display_time, end,
2346 end <= 0 ? ", dropping it" : "");
2347 ist->prev_sub.subtitle.end_display_time = end;
2350 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2351 FFSWAP(int, ret, ist->prev_sub.ret);
2352 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2360 sub2video_update(ist, &subtitle);
2362 if (!subtitle.num_rects)
2365 ist->frames_decoded++;
2367 for (i = 0; i < nb_output_streams; i++) {
2368 OutputStream *ost = output_streams[i];
2370 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2371 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2374 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2378 avsubtitle_free(&subtitle);
2382 static int send_filter_eof(InputStream *ist)
2385 for (i = 0; i < ist->nb_filters; i++) {
2386 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2393 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2394 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2398 int eof_reached = 0;
2401 if (!ist->saw_first_ts) {
2402 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2404 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2405 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2406 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2408 ist->saw_first_ts = 1;
2411 if (ist->next_dts == AV_NOPTS_VALUE)
2412 ist->next_dts = ist->dts;
2413 if (ist->next_pts == AV_NOPTS_VALUE)
2414 ist->next_pts = ist->pts;
2418 av_init_packet(&avpkt);
2425 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2426 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2427 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2428 ist->next_pts = ist->pts = ist->dts;
2431 // while we have more to decode or while the decoder did output something on EOF
2432 while (ist->decoding_needed) {
2436 ist->pts = ist->next_pts;
2437 ist->dts = ist->next_dts;
2439 switch (ist->dec_ctx->codec_type) {
2440 case AVMEDIA_TYPE_AUDIO:
2441 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2443 case AVMEDIA_TYPE_VIDEO:
2444 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2445 if (!repeating || !pkt || got_output) {
2446 if (pkt && pkt->duration) {
2447 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2448 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2449 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2450 duration = ((int64_t)AV_TIME_BASE *
2451 ist->dec_ctx->framerate.den * ticks) /
2452 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2455 if(ist->dts != AV_NOPTS_VALUE && duration) {
2456 ist->next_dts += duration;
2458 ist->next_dts = AV_NOPTS_VALUE;
2462 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2464 case AVMEDIA_TYPE_SUBTITLE:
2467 ret = transcode_subtitles(ist, &avpkt, &got_output);
2468 if (!pkt && ret >= 0)
2475 if (ret == AVERROR_EOF) {
2481 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2482 ist->file_index, ist->st->index, av_err2str(ret));
2485 // Decoding might not terminate if we're draining the decoder, and
2486 // the decoder keeps returning an error.
2487 // This should probably be considered a libavcodec issue.
2488 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2497 // During draining, we might get multiple output frames in this loop.
2498 // ffmpeg.c does not drain the filter chain on configuration changes,
2499 // which means if we send multiple frames at once to the filters, and
2500 // one of those frames changes configuration, the buffered frames will
2501 // be lost. This can upset certain FATE tests.
2502 // Decode only 1 frame per call on EOF to appease these FATE tests.
2503 // The ideal solution would be to rewrite decoding to use the new
2504 // decoding API in a better way.
2511 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2512 /* except when looping we need to flush but not to send an EOF */
2513 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2514 int ret = send_filter_eof(ist);
2516 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2521 /* handle stream copy */
2522 if (!ist->decoding_needed) {
2523 ist->dts = ist->next_dts;
2524 switch (ist->dec_ctx->codec_type) {
2525 case AVMEDIA_TYPE_AUDIO:
2526 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2527 ist->dec_ctx->sample_rate;
2529 case AVMEDIA_TYPE_VIDEO:
2530 if (ist->framerate.num) {
2531 // TODO: Remove work-around for c99-to-c89 issue 7
2532 AVRational time_base_q = AV_TIME_BASE_Q;
2533 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2534 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2535 } else if (pkt->duration) {
2536 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2537 } else if(ist->dec_ctx->framerate.num != 0) {
2538 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2539 ist->next_dts += ((int64_t)AV_TIME_BASE *
2540 ist->dec_ctx->framerate.den * ticks) /
2541 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2545 ist->pts = ist->dts;
2546 ist->next_pts = ist->next_dts;
2548 for (i = 0; pkt && i < nb_output_streams; i++) {
2549 OutputStream *ost = output_streams[i];
2551 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2554 do_streamcopy(ist, ost, pkt);
2557 return !eof_reached;
2560 static void print_sdp(void)
2565 AVIOContext *sdp_pb;
2566 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2570 for (i = 0, j = 0; i < nb_output_files; i++) {
2571 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2572 avc[j] = output_files[i]->ctx;
2580 av_sdp_create(avc, j, sdp, sizeof(sdp));
2582 if (!sdp_filename) {
2583 printf("SDP:\n%s\n", sdp);
2586 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2587 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2589 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2590 avio_closep(&sdp_pb);
2591 av_freep(&sdp_filename);
2599 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2602 for (i = 0; hwaccels[i].name; i++)
2603 if (hwaccels[i].pix_fmt == pix_fmt)
2604 return &hwaccels[i];
2608 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2610 InputStream *ist = s->opaque;
2611 const enum AVPixelFormat *p;
2614 for (p = pix_fmts; *p != -1; p++) {
2615 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2616 const HWAccel *hwaccel;
2618 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2621 hwaccel = get_hwaccel(*p);
2623 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2624 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2627 ret = hwaccel->init(s);
2629 if (ist->hwaccel_id == hwaccel->id) {
2630 av_log(NULL, AV_LOG_FATAL,
2631 "%s hwaccel requested for input stream #%d:%d, "
2632 "but cannot be initialized.\n", hwaccel->name,
2633 ist->file_index, ist->st->index);
2634 return AV_PIX_FMT_NONE;
2639 if (ist->hw_frames_ctx) {
2640 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2641 if (!s->hw_frames_ctx)
2642 return AV_PIX_FMT_NONE;
2645 ist->active_hwaccel_id = hwaccel->id;
2646 ist->hwaccel_pix_fmt = *p;
2653 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2655 InputStream *ist = s->opaque;
2657 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2658 return ist->hwaccel_get_buffer(s, frame, flags);
2660 return avcodec_default_get_buffer2(s, frame, flags);
2663 static int init_input_stream(int ist_index, char *error, int error_len)
2666 InputStream *ist = input_streams[ist_index];
2668 if (ist->decoding_needed) {
2669 AVCodec *codec = ist->dec;
2671 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2672 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2673 return AVERROR(EINVAL);
2676 ist->dec_ctx->opaque = ist;
2677 ist->dec_ctx->get_format = get_format;
2678 ist->dec_ctx->get_buffer2 = get_buffer;
2679 ist->dec_ctx->thread_safe_callbacks = 1;
2681 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2682 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2683 (ist->decoding_needed & DECODING_FOR_OST)) {
2684 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2685 if (ist->decoding_needed & DECODING_FOR_FILTER)
2686 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2689 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2691 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2692 * audio, and video decoders such as cuvid or mediacodec */
2693 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2695 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2696 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2697 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2698 if (ret == AVERROR_EXPERIMENTAL)
2699 abort_codec_experimental(codec, 0);
2701 snprintf(error, error_len,
2702 "Error while opening decoder for input stream "
2704 ist->file_index, ist->st->index, av_err2str(ret));
2707 assert_avoptions(ist->decoder_opts);
2710 ist->next_pts = AV_NOPTS_VALUE;
2711 ist->next_dts = AV_NOPTS_VALUE;
2716 static InputStream *get_input_stream(OutputStream *ost)
2718 if (ost->source_index >= 0)
2719 return input_streams[ost->source_index];
2723 static int compare_int64(const void *a, const void *b)
2725 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2728 static int init_output_bsfs(OutputStream *ost)
2733 if (!ost->nb_bitstream_filters)
2736 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2737 ctx = ost->bsf_ctx[i];
2739 ret = avcodec_parameters_copy(ctx->par_in,
2740 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2744 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2746 ret = av_bsf_init(ctx);
2748 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2749 ost->bsf_ctx[i]->filter->name);
2754 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2755 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2759 ost->st->time_base = ctx->time_base_out;
2764 static int init_output_stream_streamcopy(OutputStream *ost)
2766 OutputFile *of = output_files[ost->file_index];
2767 InputStream *ist = get_input_stream(ost);
2768 AVCodecParameters *par_dst = ost->st->codecpar;
2769 AVCodecParameters *par_src = ost->ref_par;
2772 uint64_t extra_size;
2774 av_assert0(ist && !ost->filter);
2776 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2777 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2779 av_log(NULL, AV_LOG_FATAL,
2780 "Error setting up codec context options.\n");
2783 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2785 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2787 if (extra_size > INT_MAX) {
2788 return AVERROR(EINVAL);
2791 /* if stream_copy is selected, no need to decode or encode */
2792 par_dst->codec_id = par_src->codec_id;
2793 par_dst->codec_type = par_src->codec_type;
2795 if (!par_dst->codec_tag) {
2796 unsigned int codec_tag;
2797 if (!of->ctx->oformat->codec_tag ||
2798 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2799 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2800 par_dst->codec_tag = par_src->codec_tag;
2803 par_dst->bit_rate = par_src->bit_rate;
2804 par_dst->field_order = par_src->field_order;
2805 par_dst->chroma_location = par_src->chroma_location;
2807 if (par_src->extradata_size) {
2808 par_dst->extradata = av_mallocz(extra_size);
2809 if (!par_dst->extradata) {
2810 return AVERROR(ENOMEM);
2812 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2813 par_dst->extradata_size = par_src->extradata_size;
2815 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2816 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2818 if (!ost->frame_rate.num)
2819 ost->frame_rate = ist->framerate;
2820 ost->st->avg_frame_rate = ost->frame_rate;
2822 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2826 // copy timebase while removing common factors
2827 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2829 if (ist->st->nb_side_data) {
2830 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2831 sizeof(*ist->st->side_data));
2832 if (!ost->st->side_data)
2833 return AVERROR(ENOMEM);
2835 ost->st->nb_side_data = 0;
2836 for (i = 0; i < ist->st->nb_side_data; i++) {
2837 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2838 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2840 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2843 sd_dst->data = av_malloc(sd_src->size);
2845 return AVERROR(ENOMEM);
2846 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2847 sd_dst->size = sd_src->size;
2848 sd_dst->type = sd_src->type;
2849 ost->st->nb_side_data++;
2853 ost->parser = av_parser_init(par_dst->codec_id);
2854 ost->parser_avctx = avcodec_alloc_context3(NULL);
2855 if (!ost->parser_avctx)
2856 return AVERROR(ENOMEM);
2858 switch (par_dst->codec_type) {
2859 case AVMEDIA_TYPE_AUDIO:
2860 if (audio_volume != 256) {
2861 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2864 par_dst->channel_layout = par_src->channel_layout;
2865 par_dst->sample_rate = par_src->sample_rate;
2866 par_dst->channels = par_src->channels;
2867 par_dst->frame_size = par_src->frame_size;
2868 par_dst->block_align = par_src->block_align;
2869 par_dst->initial_padding = par_src->initial_padding;
2870 par_dst->trailing_padding = par_src->trailing_padding;
2871 par_dst->profile = par_src->profile;
2872 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2873 par_dst->block_align= 0;
2874 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2875 par_dst->block_align= 0;
2877 case AVMEDIA_TYPE_VIDEO:
2878 par_dst->format = par_src->format;
2879 par_dst->color_space = par_src->color_space;
2880 par_dst->color_range = par_src->color_range;
2881 par_dst->color_primaries = par_src->color_primaries;
2882 par_dst->color_trc = par_src->color_trc;
2883 par_dst->width = par_src->width;
2884 par_dst->height = par_src->height;
2885 par_dst->video_delay = par_src->video_delay;
2886 par_dst->profile = par_src->profile;
2887 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2889 av_mul_q(ost->frame_aspect_ratio,
2890 (AVRational){ par_dst->height, par_dst->width });
2891 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2892 "with stream copy may produce invalid files\n");
2894 else if (ist->st->sample_aspect_ratio.num)
2895 sar = ist->st->sample_aspect_ratio;
2897 sar = par_src->sample_aspect_ratio;
2898 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2899 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2900 ost->st->r_frame_rate = ist->st->r_frame_rate;
2902 case AVMEDIA_TYPE_SUBTITLE:
2903 par_dst->width = par_src->width;
2904 par_dst->height = par_src->height;
2906 case AVMEDIA_TYPE_UNKNOWN:
2907 case AVMEDIA_TYPE_DATA:
2908 case AVMEDIA_TYPE_ATTACHMENT:
2917 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2921 if (ost->encoding_needed) {
2922 AVCodec *codec = ost->enc;
2923 AVCodecContext *dec = NULL;
2926 if ((ist = get_input_stream(ost)))
2928 if (dec && dec->subtitle_header) {
2929 /* ASS code assumes this buffer is null terminated so add extra byte. */
2930 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2931 if (!ost->enc_ctx->subtitle_header)
2932 return AVERROR(ENOMEM);
2933 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2934 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2936 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2937 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2938 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2940 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2941 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2942 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2944 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2945 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2946 if (!ost->enc_ctx->hw_frames_ctx)
2947 return AVERROR(ENOMEM);
2950 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2951 if (ret == AVERROR_EXPERIMENTAL)
2952 abort_codec_experimental(codec, 1);
2953 snprintf(error, error_len,
2954 "Error while opening encoder for output stream #%d:%d - "
2955 "maybe incorrect parameters such as bit_rate, rate, width or height",
2956 ost->file_index, ost->index);
2959 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2960 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2961 av_buffersink_set_frame_size(ost->filter->filter,
2962 ost->enc_ctx->frame_size);
2963 assert_avoptions(ost->encoder_opts);
2964 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2965 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2966 " It takes bits/s as argument, not kbits/s\n");
2968 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
2970 av_log(NULL, AV_LOG_FATAL,
2971 "Error initializing the output stream codec context.\n");
2975 * FIXME: ost->st->codec should't be needed here anymore.
2977 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2981 if (ost->enc_ctx->nb_coded_side_data) {
2984 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2985 sizeof(*ost->st->side_data));
2986 if (!ost->st->side_data)
2987 return AVERROR(ENOMEM);
2989 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2990 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2991 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2993 sd_dst->data = av_malloc(sd_src->size);
2995 return AVERROR(ENOMEM);
2996 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2997 sd_dst->size = sd_src->size;
2998 sd_dst->type = sd_src->type;
2999 ost->st->nb_side_data++;
3003 // copy timebase while removing common factors
3004 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3005 ost->st->codec->codec= ost->enc_ctx->codec;
3006 } else if (ost->stream_copy) {
3007 ret = init_output_stream_streamcopy(ost);
3012 * FIXME: will the codec context used by the parser during streamcopy
3013 * This should go away with the new parser API.
3015 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3020 /* initialize bitstream filters for the output stream
3021 * needs to be done here, because the codec id for streamcopy is not
3022 * known until now */
3023 ret = init_output_bsfs(ost);
3030 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3031 AVCodecContext *avctx)
3034 int n = 1, i, size, index = 0;
3037 for (p = kf; *p; p++)
3041 pts = av_malloc_array(size, sizeof(*pts));
3043 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3048 for (i = 0; i < n; i++) {
3049 char *next = strchr(p, ',');
3054 if (!memcmp(p, "chapters", 8)) {
3056 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3059 if (avf->nb_chapters > INT_MAX - size ||
3060 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3062 av_log(NULL, AV_LOG_FATAL,
3063 "Could not allocate forced key frames array.\n");
3066 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3067 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3069 for (j = 0; j < avf->nb_chapters; j++) {
3070 AVChapter *c = avf->chapters[j];
3071 av_assert1(index < size);
3072 pts[index++] = av_rescale_q(c->start, c->time_base,
3073 avctx->time_base) + t;
3078 t = parse_time_or_die("force_key_frames", p, 1);
3079 av_assert1(index < size);
3080 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3087 av_assert0(index == size);
3088 qsort(pts, size, sizeof(*pts), compare_int64);
3089 ost->forced_kf_count = size;
3090 ost->forced_kf_pts = pts;
3093 static void report_new_stream(int input_index, AVPacket *pkt)
3095 InputFile *file = input_files[input_index];
3096 AVStream *st = file->ctx->streams[pkt->stream_index];
3098 if (pkt->stream_index < file->nb_streams_warn)
3100 av_log(file->ctx, AV_LOG_WARNING,
3101 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3102 av_get_media_type_string(st->codecpar->codec_type),
3103 input_index, pkt->stream_index,
3104 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3105 file->nb_streams_warn = pkt->stream_index + 1;
3108 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3110 AVDictionaryEntry *e;
3112 uint8_t *encoder_string;
3113 int encoder_string_len;
3114 int format_flags = 0;
3115 int codec_flags = 0;
3117 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3120 e = av_dict_get(of->opts, "fflags", NULL, 0);
3122 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3125 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3127 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3129 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3132 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3135 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3136 encoder_string = av_mallocz(encoder_string_len);
3137 if (!encoder_string)
3140 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3141 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3143 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3144 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3145 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3146 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3149 static int transcode_init(void)
3151 int ret = 0, i, j, k;
3152 AVFormatContext *oc;
3155 char error[1024] = {0};
3158 for (i = 0; i < nb_filtergraphs; i++) {
3159 FilterGraph *fg = filtergraphs[i];
3160 for (j = 0; j < fg->nb_outputs; j++) {
3161 OutputFilter *ofilter = fg->outputs[j];
3162 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3164 if (fg->nb_inputs != 1)
3166 for (k = nb_input_streams-1; k >= 0 ; k--)
3167 if (fg->inputs[0]->ist == input_streams[k])
3169 ofilter->ost->source_index = k;
3173 /* init framerate emulation */
3174 for (i = 0; i < nb_input_files; i++) {
3175 InputFile *ifile = input_files[i];
3176 if (ifile->rate_emu)
3177 for (j = 0; j < ifile->nb_streams; j++)
3178 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3181 /* for each output stream, we compute the right encoding parameters */
3182 for (i = 0; i < nb_output_streams; i++) {
3183 ost = output_streams[i];
3184 oc = output_files[ost->file_index]->ctx;
3185 ist = get_input_stream(ost);
3187 if (ost->attachment_filename)
3191 ost->st->disposition = ist->st->disposition;
3193 for (j=0; j<oc->nb_streams; j++) {
3194 AVStream *st = oc->streams[j];
3195 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3198 if (j == oc->nb_streams)
3199 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3200 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3201 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3204 if (!ost->stream_copy) {
3205 AVCodecContext *enc_ctx = ost->enc_ctx;
3206 AVCodecContext *dec_ctx = NULL;
3208 set_encoder_id(output_files[ost->file_index], ost);
3211 dec_ctx = ist->dec_ctx;
3213 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3217 if (qsv_transcode_init(ost))
3222 if (cuvid_transcode_init(ost))
3226 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3227 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3228 filtergraph_is_simple(ost->filter->graph)) {
3229 FilterGraph *fg = ost->filter->graph;
3230 if (configure_filtergraph(fg)) {
3231 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3236 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3237 if (!ost->frame_rate.num)
3238 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3239 if (ist && !ost->frame_rate.num)
3240 ost->frame_rate = ist->framerate;
3241 if (ist && !ost->frame_rate.num)
3242 ost->frame_rate = ist->st->r_frame_rate;
3243 if (ist && !ost->frame_rate.num) {
3244 ost->frame_rate = (AVRational){25, 1};
3245 av_log(NULL, AV_LOG_WARNING,
3247 "about the input framerate is available. Falling "
3248 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3249 "if you want a different framerate.\n",
3250 ost->file_index, ost->index);
3252 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3253 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3254 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3255 ost->frame_rate = ost->enc->supported_framerates[idx];
3257 // reduce frame rate for mpeg4 to be within the spec limits
3258 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3259 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3260 ost->frame_rate.num, ost->frame_rate.den, 65535);
3264 switch (enc_ctx->codec_type) {
3265 case AVMEDIA_TYPE_AUDIO:
3266 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3268 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3269 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3270 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3271 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3272 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3273 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3275 case AVMEDIA_TYPE_VIDEO:
3276 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3277 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3278 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3279 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3280 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3281 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3282 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3284 for (j = 0; j < ost->forced_kf_count; j++)
3285 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3287 enc_ctx->time_base);
3289 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3290 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3291 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3292 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3293 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3294 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3295 if (!strncmp(ost->enc->name, "libx264", 7) &&
3296 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3297 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3298 av_log(NULL, AV_LOG_WARNING,
3299 "No pixel format specified, %s for H.264 encoding chosen.\n"
3300 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3301 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3302 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3303 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3304 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3305 av_log(NULL, AV_LOG_WARNING,
3306 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3307 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3308 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3309 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3311 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3312 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3314 ost->st->avg_frame_rate = ost->frame_rate;
3317 enc_ctx->width != dec_ctx->width ||
3318 enc_ctx->height != dec_ctx->height ||
3319 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3320 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3323 if (ost->forced_keyframes) {
3324 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3325 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3326 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3328 av_log(NULL, AV_LOG_ERROR,
3329 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3332 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3333 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3334 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3335 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3337 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3338 // parse it only for static kf timings
3339 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3340 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3344 case AVMEDIA_TYPE_SUBTITLE:
3345 enc_ctx->time_base = (AVRational){1, 1000};
3346 if (!enc_ctx->width) {
3347 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3348 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3351 case AVMEDIA_TYPE_DATA:
3359 if (ost->disposition) {
3360 static const AVOption opts[] = {
3361 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3362 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3363 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3364 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3365 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3366 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3367 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3368 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3369 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3370 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3371 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3372 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3373 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3374 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3377 static const AVClass class = {
3379 .item_name = av_default_item_name,
3381 .version = LIBAVUTIL_VERSION_INT,
3383 const AVClass *pclass = &class;
3385 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3391 /* init input streams */
3392 for (i = 0; i < nb_input_streams; i++)
3393 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3394 for (i = 0; i < nb_output_streams; i++) {
3395 ost = output_streams[i];
3396 avcodec_close(ost->enc_ctx);
3401 /* open each encoder */
3402 for (i = 0; i < nb_output_streams; i++) {
3403 ret = init_output_stream(output_streams[i], error, sizeof(error));
3408 /* discard unused programs */
3409 for (i = 0; i < nb_input_files; i++) {
3410 InputFile *ifile = input_files[i];
3411 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3412 AVProgram *p = ifile->ctx->programs[j];
3413 int discard = AVDISCARD_ALL;
3415 for (k = 0; k < p->nb_stream_indexes; k++)
3416 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3417 discard = AVDISCARD_DEFAULT;
3420 p->discard = discard;
3424 /* open files and write file headers */
3425 for (i = 0; i < nb_output_files; i++) {
3426 oc = output_files[i]->ctx;
3427 oc->interrupt_callback = int_cb;
3428 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3429 snprintf(error, sizeof(error),
3430 "Could not write header for output file #%d "
3431 "(incorrect codec parameters ?): %s",
3432 i, av_err2str(ret));
3433 ret = AVERROR(EINVAL);
3436 // assert_avoptions(output_files[i]->opts);
3437 if (strcmp(oc->oformat->name, "rtp")) {
3443 /* dump the file output parameters - cannot be done before in case
3445 for (i = 0; i < nb_output_files; i++) {
3446 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3449 /* dump the stream mapping */
3450 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3451 for (i = 0; i < nb_input_streams; i++) {
3452 ist = input_streams[i];
3454 for (j = 0; j < ist->nb_filters; j++) {
3455 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3456 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3457 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3458 ist->filters[j]->name);
3459 if (nb_filtergraphs > 1)
3460 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3461 av_log(NULL, AV_LOG_INFO, "\n");
3466 for (i = 0; i < nb_output_streams; i++) {
3467 ost = output_streams[i];
3469 if (ost->attachment_filename) {
3470 /* an attached file */
3471 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3472 ost->attachment_filename, ost->file_index, ost->index);
3476 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3477 /* output from a complex graph */
3478 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3479 if (nb_filtergraphs > 1)
3480 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3482 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3483 ost->index, ost->enc ? ost->enc->name : "?");
3487 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3488 input_streams[ost->source_index]->file_index,
3489 input_streams[ost->source_index]->st->index,
3492 if (ost->sync_ist != input_streams[ost->source_index])
3493 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3494 ost->sync_ist->file_index,
3495 ost->sync_ist->st->index);
3496 if (ost->stream_copy)
3497 av_log(NULL, AV_LOG_INFO, " (copy)");
3499 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3500 const AVCodec *out_codec = ost->enc;
3501 const char *decoder_name = "?";
3502 const char *in_codec_name = "?";
3503 const char *encoder_name = "?";
3504 const char *out_codec_name = "?";
3505 const AVCodecDescriptor *desc;
3508 decoder_name = in_codec->name;
3509 desc = avcodec_descriptor_get(in_codec->id);
3511 in_codec_name = desc->name;
3512 if (!strcmp(decoder_name, in_codec_name))
3513 decoder_name = "native";
3517 encoder_name = out_codec->name;
3518 desc = avcodec_descriptor_get(out_codec->id);
3520 out_codec_name = desc->name;
3521 if (!strcmp(encoder_name, out_codec_name))
3522 encoder_name = "native";
3525 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3526 in_codec_name, decoder_name,
3527 out_codec_name, encoder_name);
3529 av_log(NULL, AV_LOG_INFO, "\n");
3533 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3537 if (sdp_filename || want_sdp) {
3541 transcode_init_done = 1;
3546 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3547 static int need_output(void)
3551 for (i = 0; i < nb_output_streams; i++) {
3552 OutputStream *ost = output_streams[i];
3553 OutputFile *of = output_files[ost->file_index];
3554 AVFormatContext *os = output_files[ost->file_index]->ctx;
3556 if (ost->finished ||
3557 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3559 if (ost->frame_number >= ost->max_frames) {
3561 for (j = 0; j < of->ctx->nb_streams; j++)
3562 close_output_stream(output_streams[of->ost_index + j]);
3573 * Select the output stream to process.
3575 * @return selected output stream, or NULL if none available
3577 static OutputStream *choose_output(void)
3580 int64_t opts_min = INT64_MAX;
3581 OutputStream *ost_min = NULL;
3583 for (i = 0; i < nb_output_streams; i++) {
3584 OutputStream *ost = output_streams[i];
3585 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3586 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3588 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3589 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3591 if (!ost->finished && opts < opts_min) {
3593 ost_min = ost->unavailable ? NULL : ost;
3599 static void set_tty_echo(int on)
3603 if (tcgetattr(0, &tty) == 0) {
3604 if (on) tty.c_lflag |= ECHO;
3605 else tty.c_lflag &= ~ECHO;
3606 tcsetattr(0, TCSANOW, &tty);
3611 static int check_keyboard_interaction(int64_t cur_time)
3614 static int64_t last_time;
3615 if (received_nb_signals)
3616 return AVERROR_EXIT;
3617 /* read_key() returns 0 on EOF */
3618 if(cur_time - last_time >= 100000 && !run_as_daemon){
3620 last_time = cur_time;
3624 return AVERROR_EXIT;
3625 if (key == '+') av_log_set_level(av_log_get_level()+10);
3626 if (key == '-') av_log_set_level(av_log_get_level()-10);
3627 if (key == 's') qp_hist ^= 1;
3630 do_hex_dump = do_pkt_dump = 0;
3631 } else if(do_pkt_dump){
3635 av_log_set_level(AV_LOG_DEBUG);
3637 if (key == 'c' || key == 'C'){
3638 char buf[4096], target[64], command[256], arg[256] = {0};
3641 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3644 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3649 fprintf(stderr, "\n");
3651 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3652 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3653 target, time, command, arg);
3654 for (i = 0; i < nb_filtergraphs; i++) {
3655 FilterGraph *fg = filtergraphs[i];
3658 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3659 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3660 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3661 } else if (key == 'c') {
3662 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3663 ret = AVERROR_PATCHWELCOME;
3665 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3667 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3672 av_log(NULL, AV_LOG_ERROR,
3673 "Parse error, at least 3 arguments were expected, "
3674 "only %d given in string '%s'\n", n, buf);
3677 if (key == 'd' || key == 'D'){
3680 debug = input_streams[0]->st->codec->debug<<1;
3681 if(!debug) debug = 1;
3682 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3689 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3694 fprintf(stderr, "\n");
3695 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3696 fprintf(stderr,"error parsing debug value\n");
3698 for(i=0;i<nb_input_streams;i++) {
3699 input_streams[i]->st->codec->debug = debug;
3701 for(i=0;i<nb_output_streams;i++) {
3702 OutputStream *ost = output_streams[i];
3703 ost->enc_ctx->debug = debug;
3705 if(debug) av_log_set_level(AV_LOG_DEBUG);
3706 fprintf(stderr,"debug=%d\n", debug);
3709 fprintf(stderr, "key function\n"
3710 "? show this help\n"
3711 "+ increase verbosity\n"
3712 "- decrease verbosity\n"
3713 "c Send command to first matching filter supporting it\n"
3714 "C Send/Que command to all matching filters\n"
3715 "D cycle through available debug modes\n"
3716 "h dump packets/hex press to cycle through the 3 states\n"
3718 "s Show QP histogram\n"
3725 static void *input_thread(void *arg)
3728 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3733 ret = av_read_frame(f->ctx, &pkt);
3735 if (ret == AVERROR(EAGAIN)) {
3740 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3743 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3744 if (flags && ret == AVERROR(EAGAIN)) {
3746 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3747 av_log(f->ctx, AV_LOG_WARNING,
3748 "Thread message queue blocking; consider raising the "
3749 "thread_queue_size option (current value: %d)\n",
3750 f->thread_queue_size);
3753 if (ret != AVERROR_EOF)
3754 av_log(f->ctx, AV_LOG_ERROR,
3755 "Unable to send packet to main thread: %s\n",
3757 av_packet_unref(&pkt);
3758 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3766 static void free_input_threads(void)
3770 for (i = 0; i < nb_input_files; i++) {
3771 InputFile *f = input_files[i];
3774 if (!f || !f->in_thread_queue)
3776 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3777 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3778 av_packet_unref(&pkt);
3780 pthread_join(f->thread, NULL);
3782 av_thread_message_queue_free(&f->in_thread_queue);
3786 static int init_input_threads(void)
3790 if (nb_input_files == 1)
3793 for (i = 0; i < nb_input_files; i++) {
3794 InputFile *f = input_files[i];
3796 if (f->ctx->pb ? !f->ctx->pb->seekable :
3797 strcmp(f->ctx->iformat->name, "lavfi"))
3798 f->non_blocking = 1;
3799 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3800 f->thread_queue_size, sizeof(AVPacket));
3804 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3805 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3806 av_thread_message_queue_free(&f->in_thread_queue);
3807 return AVERROR(ret);
3813 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3815 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3817 AV_THREAD_MESSAGE_NONBLOCK : 0);
3821 static int get_input_packet(InputFile *f, AVPacket *pkt)
3825 for (i = 0; i < f->nb_streams; i++) {
3826 InputStream *ist = input_streams[f->ist_index + i];
3827 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3828 int64_t now = av_gettime_relative() - ist->start;
3830 return AVERROR(EAGAIN);
3835 if (nb_input_files > 1)
3836 return get_input_packet_mt(f, pkt);
3838 return av_read_frame(f->ctx, pkt);
3841 static int got_eagain(void)
3844 for (i = 0; i < nb_output_streams; i++)
3845 if (output_streams[i]->unavailable)
3850 static void reset_eagain(void)
3853 for (i = 0; i < nb_input_files; i++)
3854 input_files[i]->eagain = 0;
3855 for (i = 0; i < nb_output_streams; i++)
3856 output_streams[i]->unavailable = 0;
3859 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3860 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3861 AVRational time_base)
3867 return tmp_time_base;
3870 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3873 return tmp_time_base;
3879 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3882 AVCodecContext *avctx;
3883 int i, ret, has_audio = 0;
3884 int64_t duration = 0;
3886 ret = av_seek_frame(is, -1, is->start_time, 0);
3890 for (i = 0; i < ifile->nb_streams; i++) {
3891 ist = input_streams[ifile->ist_index + i];
3892 avctx = ist->dec_ctx;
3895 if (ist->decoding_needed) {
3896 process_input_packet(ist, NULL, 1);
3897 avcodec_flush_buffers(avctx);
3900 /* duration is the length of the last frame in a stream
3901 * when audio stream is present we don't care about
3902 * last video frame length because it's not defined exactly */
3903 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3907 for (i = 0; i < ifile->nb_streams; i++) {
3908 ist = input_streams[ifile->ist_index + i];
3909 avctx = ist->dec_ctx;
3912 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3913 AVRational sample_rate = {1, avctx->sample_rate};
3915 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3919 if (ist->framerate.num) {
3920 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3921 } else if (ist->st->avg_frame_rate.num) {
3922 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3923 } else duration = 1;
3925 if (!ifile->duration)
3926 ifile->time_base = ist->st->time_base;
3927 /* the total duration of the stream, max_pts - min_pts is
3928 * the duration of the stream without the last frame */
3929 duration += ist->max_pts - ist->min_pts;
3930 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3934 if (ifile->loop > 0)
3942 * - 0 -- one packet was read and processed
3943 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3944 * this function should be called again
3945 * - AVERROR_EOF -- this function should not be called again
3947 static int process_input(int file_index)
3949 InputFile *ifile = input_files[file_index];
3950 AVFormatContext *is;
3958 ret = get_input_packet(ifile, &pkt);
3960 if (ret == AVERROR(EAGAIN)) {
3964 if (ret < 0 && ifile->loop) {
3965 if ((ret = seek_to_start(ifile, is)) < 0)
3967 ret = get_input_packet(ifile, &pkt);
3968 if (ret == AVERROR(EAGAIN)) {
3974 if (ret != AVERROR_EOF) {
3975 print_error(is->filename, ret);
3980 for (i = 0; i < ifile->nb_streams; i++) {
3981 ist = input_streams[ifile->ist_index + i];
3982 if (ist->decoding_needed) {
3983 ret = process_input_packet(ist, NULL, 0);
3988 /* mark all outputs that don't go through lavfi as finished */
3989 for (j = 0; j < nb_output_streams; j++) {
3990 OutputStream *ost = output_streams[j];
3992 if (ost->source_index == ifile->ist_index + i &&
3993 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3994 finish_output_stream(ost);
3998 ifile->eof_reached = 1;
3999 return AVERROR(EAGAIN);
4005 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4006 is->streams[pkt.stream_index]);
4008 /* the following test is needed in case new streams appear
4009 dynamically in stream : we ignore them */
4010 if (pkt.stream_index >= ifile->nb_streams) {
4011 report_new_stream(file_index, &pkt);
4012 goto discard_packet;
4015 ist = input_streams[ifile->ist_index + pkt.stream_index];
4017 ist->data_size += pkt.size;
4021 goto discard_packet;
4023 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4024 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4029 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4030 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4031 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4032 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4033 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4034 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4035 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4036 av_ts2str(input_files[ist->file_index]->ts_offset),
4037 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4040 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4041 int64_t stime, stime2;
4042 // Correcting starttime based on the enabled streams
4043 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4044 // so we instead do it here as part of discontinuity handling
4045 if ( ist->next_dts == AV_NOPTS_VALUE
4046 && ifile->ts_offset == -is->start_time
4047 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4048 int64_t new_start_time = INT64_MAX;
4049 for (i=0; i<is->nb_streams; i++) {
4050 AVStream *st = is->streams[i];
4051 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4053 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4055 if (new_start_time > is->start_time) {
4056 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4057 ifile->ts_offset = -new_start_time;
4061 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4062 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4063 ist->wrap_correction_done = 1;
4065 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4066 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4067 ist->wrap_correction_done = 0;
4069 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4070 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4071 ist->wrap_correction_done = 0;
4075 /* add the stream-global side data to the first packet */
4076 if (ist->nb_packets == 1) {
4077 if (ist->st->nb_side_data)
4078 av_packet_split_side_data(&pkt);
4079 for (i = 0; i < ist->st->nb_side_data; i++) {
4080 AVPacketSideData *src_sd = &ist->st->side_data[i];
4083 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4085 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4088 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4092 memcpy(dst_data, src_sd->data, src_sd->size);
4096 if (pkt.dts != AV_NOPTS_VALUE)
4097 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4098 if (pkt.pts != AV_NOPTS_VALUE)
4099 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4101 if (pkt.pts != AV_NOPTS_VALUE)
4102 pkt.pts *= ist->ts_scale;
4103 if (pkt.dts != AV_NOPTS_VALUE)
4104 pkt.dts *= ist->ts_scale;
4106 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4107 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4108 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4109 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4110 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4111 int64_t delta = pkt_dts - ifile->last_ts;
4112 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4113 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4114 ifile->ts_offset -= delta;
4115 av_log(NULL, AV_LOG_DEBUG,
4116 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4117 delta, ifile->ts_offset);
4118 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4119 if (pkt.pts != AV_NOPTS_VALUE)
4120 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4124 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4125 if (pkt.pts != AV_NOPTS_VALUE) {
4126 pkt.pts += duration;
4127 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4128 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4131 if (pkt.dts != AV_NOPTS_VALUE)
4132 pkt.dts += duration;
4134 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4135 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4136 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4137 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4139 int64_t delta = pkt_dts - ist->next_dts;
4140 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4141 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4142 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4143 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4144 ifile->ts_offset -= delta;
4145 av_log(NULL, AV_LOG_DEBUG,
4146 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4147 delta, ifile->ts_offset);
4148 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4149 if (pkt.pts != AV_NOPTS_VALUE)
4150 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4153 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4154 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4155 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4156 pkt.dts = AV_NOPTS_VALUE;
4158 if (pkt.pts != AV_NOPTS_VALUE){
4159 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4160 delta = pkt_pts - ist->next_dts;
4161 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4162 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4163 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4164 pkt.pts = AV_NOPTS_VALUE;
4170 if (pkt.dts != AV_NOPTS_VALUE)
4171 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4174 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4175 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4176 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4177 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4178 av_ts2str(input_files[ist->file_index]->ts_offset),
4179 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4182 sub2video_heartbeat(ist, pkt.pts);
4184 process_input_packet(ist, &pkt, 0);
4187 av_packet_unref(&pkt);
4193 * Perform a step of transcoding for the specified filter graph.
4195 * @param[in] graph filter graph to consider
4196 * @param[out] best_ist input stream where a frame would allow to continue
4197 * @return 0 for success, <0 for error
4199 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4202 int nb_requests, nb_requests_max = 0;
4203 InputFilter *ifilter;
4207 ret = avfilter_graph_request_oldest(graph->graph);
4209 return reap_filters(0);
4211 if (ret == AVERROR_EOF) {
4212 ret = reap_filters(1);
4213 for (i = 0; i < graph->nb_outputs; i++)
4214 close_output_stream(graph->outputs[i]->ost);
4217 if (ret != AVERROR(EAGAIN))
4220 for (i = 0; i < graph->nb_inputs; i++) {
4221 ifilter = graph->inputs[i];
4223 if (input_files[ist->file_index]->eagain ||
4224 input_files[ist->file_index]->eof_reached)
4226 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4227 if (nb_requests > nb_requests_max) {
4228 nb_requests_max = nb_requests;
4234 for (i = 0; i < graph->nb_outputs; i++)
4235 graph->outputs[i]->ost->unavailable = 1;
4241 * Run a single step of transcoding.
4243 * @return 0 for success, <0 for error
4245 static int transcode_step(void)
4251 ost = choose_output();
4258 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4263 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4268 av_assert0(ost->source_index >= 0);
4269 ist = input_streams[ost->source_index];
4272 ret = process_input(ist->file_index);
4273 if (ret == AVERROR(EAGAIN)) {
4274 if (input_files[ist->file_index]->eagain)
4275 ost->unavailable = 1;
4280 return ret == AVERROR_EOF ? 0 : ret;
4282 return reap_filters(0);
4286 * The following code is the main loop of the file converter
4288 static int transcode(void)
4291 AVFormatContext *os;
4294 int64_t timer_start;
4295 int64_t total_packets_written = 0;
4297 ret = transcode_init();
4301 if (stdin_interaction) {
4302 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4305 timer_start = av_gettime_relative();
4308 if ((ret = init_input_threads()) < 0)
4312 while (!received_sigterm) {
4313 int64_t cur_time= av_gettime_relative();
4315 /* if 'q' pressed, exits */
4316 if (stdin_interaction)
4317 if (check_keyboard_interaction(cur_time) < 0)
4320 /* check if there's any stream where output is still needed */
4321 if (!need_output()) {
4322 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4326 ret = transcode_step();
4327 if (ret < 0 && ret != AVERROR_EOF) {
4329 av_strerror(ret, errbuf, sizeof(errbuf));
4331 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4335 /* dump report by using the output first video and audio streams */
4336 print_report(0, timer_start, cur_time);
4339 free_input_threads();
4342 /* at the end of stream, we must flush the decoder buffers */
4343 for (i = 0; i < nb_input_streams; i++) {
4344 ist = input_streams[i];
4345 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4346 process_input_packet(ist, NULL, 0);
4353 /* write the trailer if needed and close file */
4354 for (i = 0; i < nb_output_files; i++) {
4355 os = output_files[i]->ctx;
4356 if ((ret = av_write_trailer(os)) < 0) {
4357 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4363 /* dump report by using the first video and audio streams */
4364 print_report(1, timer_start, av_gettime_relative());
4366 /* close each encoder */
4367 for (i = 0; i < nb_output_streams; i++) {
4368 ost = output_streams[i];
4369 if (ost->encoding_needed) {
4370 av_freep(&ost->enc_ctx->stats_in);
4372 total_packets_written += ost->packets_written;
4375 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4376 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4380 /* close each decoder */
4381 for (i = 0; i < nb_input_streams; i++) {
4382 ist = input_streams[i];
4383 if (ist->decoding_needed) {
4384 avcodec_close(ist->dec_ctx);
4385 if (ist->hwaccel_uninit)
4386 ist->hwaccel_uninit(ist->dec_ctx);
4390 av_buffer_unref(&hw_device_ctx);
4397 free_input_threads();
4400 if (output_streams) {
4401 for (i = 0; i < nb_output_streams; i++) {
4402 ost = output_streams[i];
4405 if (fclose(ost->logfile))
4406 av_log(NULL, AV_LOG_ERROR,
4407 "Error closing logfile, loss of information possible: %s\n",
4408 av_err2str(AVERROR(errno)));
4409 ost->logfile = NULL;
4411 av_freep(&ost->forced_kf_pts);
4412 av_freep(&ost->apad);
4413 av_freep(&ost->disposition);
4414 av_dict_free(&ost->encoder_opts);
4415 av_dict_free(&ost->sws_dict);
4416 av_dict_free(&ost->swr_opts);
4417 av_dict_free(&ost->resample_opts);
4425 static int64_t getutime(void)
4428 struct rusage rusage;
4430 getrusage(RUSAGE_SELF, &rusage);
4431 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4432 #elif HAVE_GETPROCESSTIMES
4434 FILETIME c, e, k, u;
4435 proc = GetCurrentProcess();
4436 GetProcessTimes(proc, &c, &e, &k, &u);
4437 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4439 return av_gettime_relative();
4443 static int64_t getmaxrss(void)
4445 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4446 struct rusage rusage;
4447 getrusage(RUSAGE_SELF, &rusage);
4448 return (int64_t)rusage.ru_maxrss * 1024;
4449 #elif HAVE_GETPROCESSMEMORYINFO
4451 PROCESS_MEMORY_COUNTERS memcounters;
4452 proc = GetCurrentProcess();
4453 memcounters.cb = sizeof(memcounters);
4454 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4455 return memcounters.PeakPagefileUsage;
4461 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4465 int main(int argc, char **argv)
4472 register_exit(ffmpeg_cleanup);
4474 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4476 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4477 parse_loglevel(argc, argv, options);
4479 if(argc>1 && !strcmp(argv[1], "-d")){
4481 av_log_set_callback(log_callback_null);
4486 avcodec_register_all();
4488 avdevice_register_all();
4490 avfilter_register_all();
4492 avformat_network_init();
4494 show_banner(argc, argv, options);
4496 /* parse options and open all input/output files */
4497 ret = ffmpeg_parse_options(argc, argv);
4501 if (nb_output_files <= 0 && nb_input_files == 0) {
4503 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4507 /* file converter / grab */
4508 if (nb_output_files <= 0) {
4509 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4513 // if (nb_input_files == 0) {
4514 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4518 current_time = ti = getutime();
4519 if (transcode() < 0)
4521 ti = getutime() - ti;
4523 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4525 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4526 decode_error_stat[0], decode_error_stat[1]);
4527 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4530 exit_program(received_nb_signals ? 255 : main_return_code);
4531 return main_return_code;