2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 av_packet_move_ref(&tmp_pkt, pkt);
673 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
677 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
678 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
679 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
682 * Audio encoders may split the packets -- #frames in != #packets out.
683 * But there is no reordering, so we can limit the number of output packets
684 * by simply dropping them here.
685 * Counting encoded video frames needs to be done separately because of
686 * reordering, see do_video_out()
688 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689 if (ost->frame_number >= ost->max_frames) {
690 av_packet_unref(pkt);
695 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
697 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
699 ost->quality = sd ? AV_RL32(sd) : -1;
700 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
702 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
704 ost->error[i] = AV_RL64(sd + 8 + 8*i);
709 if (ost->frame_rate.num && ost->is_cfr) {
710 if (pkt->duration > 0)
711 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
717 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718 if (pkt->dts != AV_NOPTS_VALUE &&
719 pkt->pts != AV_NOPTS_VALUE &&
720 pkt->dts > pkt->pts) {
721 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
723 ost->file_index, ost->st->index);
725 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
729 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
730 pkt->dts != AV_NOPTS_VALUE &&
731 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732 ost->last_mux_dts != AV_NOPTS_VALUE) {
733 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734 if (pkt->dts < max) {
735 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736 av_log(s, loglevel, "Non-monotonous DTS in output stream "
737 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
740 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
743 av_log(s, loglevel, "changing to %"PRId64". This may result "
744 "in incorrect timestamps in the output file.\n",
746 if (pkt->pts >= pkt->dts)
747 pkt->pts = FFMAX(pkt->pts, max);
752 ost->last_mux_dts = pkt->dts;
754 ost->data_size += pkt->size;
755 ost->packets_written++;
757 pkt->stream_index = ost->index;
760 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
762 av_get_media_type_string(ost->enc_ctx->codec_type),
763 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
769 ret = av_interleaved_write_frame(s, pkt);
771 print_error("av_interleaved_write_frame()", ret);
772 main_return_code = 1;
773 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
775 av_packet_unref(pkt);
778 static void close_output_stream(OutputStream *ost)
780 OutputFile *of = output_files[ost->file_index];
782 ost->finished |= ENCODER_FINISHED;
784 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785 of->recording_time = FFMIN(of->recording_time, end);
789 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
793 /* apply the output bitstream filters, if any */
794 if (ost->nb_bitstream_filters) {
797 av_packet_split_side_data(pkt);
798 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
804 /* get a packet from the previous filter up the chain */
805 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806 if (ret == AVERROR(EAGAIN)) {
812 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813 * the api states this shouldn't happen after init(). Propagate it here to the
814 * muxer and to the next filters in the chain to workaround this.
815 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816 * par_out->extradata and adapt muxers accordingly to get rid of this. */
817 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
821 ost->bsf_extradata_updated[idx - 1] |= 1;
824 /* send it to the next filter down the chain or to the muxer */
825 if (idx < ost->nb_bitstream_filters) {
826 /* HACK/FIXME! - See above */
827 if (!(ost->bsf_extradata_updated[idx] & 2)) {
828 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
831 ost->bsf_extradata_updated[idx] |= 2;
833 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
838 write_packet(of, pkt, ost);
841 write_packet(of, pkt, ost);
844 if (ret < 0 && ret != AVERROR_EOF) {
845 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
852 static int check_recording_time(OutputStream *ost)
854 OutputFile *of = output_files[ost->file_index];
856 if (of->recording_time != INT64_MAX &&
857 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
858 AV_TIME_BASE_Q) >= 0) {
859 close_output_stream(ost);
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
868 AVCodecContext *enc = ost->enc_ctx;
872 av_init_packet(&pkt);
876 if (!check_recording_time(ost))
879 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880 frame->pts = ost->sync_opts;
881 ost->sync_opts = frame->pts + frame->nb_samples;
882 ost->samples_encoded += frame->nb_samples;
883 ost->frames_encoded++;
885 av_assert0(pkt.size || !pkt.data);
886 update_benchmark(NULL);
888 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891 enc->time_base.num, enc->time_base.den);
894 ret = avcodec_send_frame(enc, frame);
899 ret = avcodec_receive_packet(enc, &pkt);
900 if (ret == AVERROR(EAGAIN))
905 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
907 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
910 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
916 output_packet(of, &pkt, ost);
921 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
925 static void do_subtitle_out(OutputFile *of,
929 int subtitle_out_max_size = 1024 * 1024;
930 int subtitle_out_size, nb, i;
935 if (sub->pts == AV_NOPTS_VALUE) {
936 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
945 subtitle_out = av_malloc(subtitle_out_max_size);
947 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
952 /* Note: DVB subtitle need one packet to draw them and one other
953 packet to clear them */
954 /* XXX: signal it in the codec context ? */
955 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
960 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
962 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963 pts -= output_files[ost->file_index]->start_time;
964 for (i = 0; i < nb; i++) {
965 unsigned save_num_rects = sub->num_rects;
967 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968 if (!check_recording_time(ost))
972 // start_display_time is required to be 0
973 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
974 sub->end_display_time -= sub->start_display_time;
975 sub->start_display_time = 0;
979 ost->frames_encoded++;
981 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982 subtitle_out_max_size, sub);
984 sub->num_rects = save_num_rects;
985 if (subtitle_out_size < 0) {
986 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
990 av_init_packet(&pkt);
991 pkt.data = subtitle_out;
992 pkt.size = subtitle_out_size;
993 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996 /* XXX: the pts correction is handled here. Maybe handling
997 it in the codec would be better */
999 pkt.pts += 90 * sub->start_display_time;
1001 pkt.pts += 90 * sub->end_display_time;
1004 output_packet(of, &pkt, ost);
1008 static void do_video_out(OutputFile *of,
1010 AVFrame *next_picture,
1013 int ret, format_video_sync;
1015 AVCodecContext *enc = ost->enc_ctx;
1016 AVCodecParameters *mux_par = ost->st->codecpar;
1017 int nb_frames, nb0_frames, i;
1018 double delta, delta0;
1019 double duration = 0;
1021 InputStream *ist = NULL;
1022 AVFilterContext *filter = ost->filter->filter;
1024 if (ost->source_index >= 0)
1025 ist = input_streams[ost->source_index];
1027 if (filter->inputs[0]->frame_rate.num > 0 &&
1028 filter->inputs[0]->frame_rate.den > 0)
1029 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1031 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1032 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1034 if (!ost->filters_script &&
1038 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1039 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1042 if (!next_picture) {
1044 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1045 ost->last_nb0_frames[1],
1046 ost->last_nb0_frames[2]);
1048 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1049 delta = delta0 + duration;
1051 /* by default, we output a single frame */
1052 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1055 format_video_sync = video_sync_method;
1056 if (format_video_sync == VSYNC_AUTO) {
1057 if(!strcmp(of->ctx->oformat->name, "avi")) {
1058 format_video_sync = VSYNC_VFR;
1060 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1062 && format_video_sync == VSYNC_CFR
1063 && input_files[ist->file_index]->ctx->nb_streams == 1
1064 && input_files[ist->file_index]->input_ts_offset == 0) {
1065 format_video_sync = VSYNC_VSCFR;
1067 if (format_video_sync == VSYNC_CFR && copy_ts) {
1068 format_video_sync = VSYNC_VSCFR;
1071 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1075 format_video_sync != VSYNC_PASSTHROUGH &&
1076 format_video_sync != VSYNC_DROP) {
1077 if (delta0 < -0.6) {
1078 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1080 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1081 sync_ipts = ost->sync_opts;
1086 switch (format_video_sync) {
1088 if (ost->frame_number == 0 && delta0 >= 0.5) {
1089 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1092 ost->sync_opts = lrint(sync_ipts);
1095 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1096 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1098 } else if (delta < -1.1)
1100 else if (delta > 1.1) {
1101 nb_frames = lrintf(delta);
1103 nb0_frames = lrintf(delta0 - 0.6);
1109 else if (delta > 0.6)
1110 ost->sync_opts = lrint(sync_ipts);
1113 case VSYNC_PASSTHROUGH:
1114 ost->sync_opts = lrint(sync_ipts);
1121 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1122 nb0_frames = FFMIN(nb0_frames, nb_frames);
1124 memmove(ost->last_nb0_frames + 1,
1125 ost->last_nb0_frames,
1126 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1127 ost->last_nb0_frames[0] = nb0_frames;
1129 if (nb0_frames == 0 && ost->last_dropped) {
1131 av_log(NULL, AV_LOG_VERBOSE,
1132 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1133 ost->frame_number, ost->st->index, ost->last_frame->pts);
1135 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1136 if (nb_frames > dts_error_threshold * 30) {
1137 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1141 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1142 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1143 if (nb_frames_dup > dup_warning) {
1144 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1148 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1150 /* duplicates frame if needed */
1151 for (i = 0; i < nb_frames; i++) {
1152 AVFrame *in_picture;
1153 av_init_packet(&pkt);
1157 if (i < nb0_frames && ost->last_frame) {
1158 in_picture = ost->last_frame;
1160 in_picture = next_picture;
1165 in_picture->pts = ost->sync_opts;
1168 if (!check_recording_time(ost))
1170 if (ost->frame_number >= ost->max_frames)
1174 #if FF_API_LAVF_FMT_RAWPICTURE
1175 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1176 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1177 /* raw pictures are written as AVPicture structure to
1178 avoid any copies. We support temporarily the older
1180 if (in_picture->interlaced_frame)
1181 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1183 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1184 pkt.data = (uint8_t *)in_picture;
1185 pkt.size = sizeof(AVPicture);
1186 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1187 pkt.flags |= AV_PKT_FLAG_KEY;
1189 output_packet(of, &pkt, ost);
1193 int forced_keyframe = 0;
1196 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1197 ost->top_field_first >= 0)
1198 in_picture->top_field_first = !!ost->top_field_first;
1200 if (in_picture->interlaced_frame) {
1201 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1202 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1204 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 in_picture->quality = enc->global_quality;
1209 in_picture->pict_type = 0;
1211 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1212 in_picture->pts * av_q2d(enc->time_base) : NAN;
1213 if (ost->forced_kf_index < ost->forced_kf_count &&
1214 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1215 ost->forced_kf_index++;
1216 forced_keyframe = 1;
1217 } else if (ost->forced_keyframes_pexpr) {
1219 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1220 res = av_expr_eval(ost->forced_keyframes_pexpr,
1221 ost->forced_keyframes_expr_const_values, NULL);
1222 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1223 ost->forced_keyframes_expr_const_values[FKF_N],
1224 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1225 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1226 ost->forced_keyframes_expr_const_values[FKF_T],
1227 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1230 forced_keyframe = 1;
1231 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1232 ost->forced_keyframes_expr_const_values[FKF_N];
1233 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1234 ost->forced_keyframes_expr_const_values[FKF_T];
1235 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1238 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1239 } else if ( ost->forced_keyframes
1240 && !strncmp(ost->forced_keyframes, "source", 6)
1241 && in_picture->key_frame==1) {
1242 forced_keyframe = 1;
1245 if (forced_keyframe) {
1246 in_picture->pict_type = AV_PICTURE_TYPE_I;
1247 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1250 update_benchmark(NULL);
1252 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1253 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1254 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1255 enc->time_base.num, enc->time_base.den);
1258 ost->frames_encoded++;
1260 ret = avcodec_send_frame(enc, in_picture);
1265 ret = avcodec_receive_packet(enc, &pkt);
1266 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1267 if (ret == AVERROR(EAGAIN))
1273 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1274 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1275 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1276 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1279 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1280 pkt.pts = ost->sync_opts;
1282 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1285 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1286 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1287 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1288 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1291 frame_size = pkt.size;
1292 output_packet(of, &pkt, ost);
1294 /* if two pass, output log */
1295 if (ost->logfile && enc->stats_out) {
1296 fprintf(ost->logfile, "%s", enc->stats_out);
1302 * For video, number of frames in == number of packets out.
1303 * But there may be reordering, so we can't throw away frames on encoder
1304 * flush, we need to limit them here, before they go into encoder.
1306 ost->frame_number++;
1308 if (vstats_filename && frame_size)
1309 do_video_stats(ost, frame_size);
1312 if (!ost->last_frame)
1313 ost->last_frame = av_frame_alloc();
1314 av_frame_unref(ost->last_frame);
1315 if (next_picture && ost->last_frame)
1316 av_frame_ref(ost->last_frame, next_picture);
1318 av_frame_free(&ost->last_frame);
1322 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1326 static double psnr(double d)
1328 return -10.0 * log10(d);
1331 static void do_video_stats(OutputStream *ost, int frame_size)
1333 AVCodecContext *enc;
1335 double ti1, bitrate, avg_bitrate;
1337 /* this is executed just the first time do_video_stats is called */
1339 vstats_file = fopen(vstats_filename, "w");
1347 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1348 frame_number = ost->st->nb_frames;
1349 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1350 ost->quality / (float)FF_QP2LAMBDA);
1352 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1353 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1355 fprintf(vstats_file,"f_size= %6d ", frame_size);
1356 /* compute pts value */
1357 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1361 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1362 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1363 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1364 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1365 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1369 static void finish_output_stream(OutputStream *ost)
1371 OutputFile *of = output_files[ost->file_index];
1374 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1377 for (i = 0; i < of->ctx->nb_streams; i++)
1378 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1383 * Get and encode new output from any of the filtergraphs, without causing
1386 * @return 0 for success, <0 for severe errors
1388 static int reap_filters(int flush)
1390 AVFrame *filtered_frame = NULL;
1393 /* Reap all buffers present in the buffer sinks */
1394 for (i = 0; i < nb_output_streams; i++) {
1395 OutputStream *ost = output_streams[i];
1396 OutputFile *of = output_files[ost->file_index];
1397 AVFilterContext *filter;
1398 AVCodecContext *enc = ost->enc_ctx;
1403 filter = ost->filter->filter;
1405 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1406 return AVERROR(ENOMEM);
1408 filtered_frame = ost->filtered_frame;
1411 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1412 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1413 AV_BUFFERSINK_FLAG_NO_REQUEST);
1415 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1416 av_log(NULL, AV_LOG_WARNING,
1417 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1418 } else if (flush && ret == AVERROR_EOF) {
1419 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1420 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1424 if (ost->finished) {
1425 av_frame_unref(filtered_frame);
1428 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1429 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1430 AVRational tb = enc->time_base;
1431 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1433 tb.den <<= extra_bits;
1435 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1436 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1437 float_pts /= 1 << extra_bits;
1438 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1439 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1441 filtered_frame->pts =
1442 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1443 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1445 //if (ost->source_index >= 0)
1446 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1448 switch (filter->inputs[0]->type) {
1449 case AVMEDIA_TYPE_VIDEO:
1450 if (!ost->frame_aspect_ratio.num)
1451 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1454 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1455 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1457 enc->time_base.num, enc->time_base.den);
1460 do_video_out(of, ost, filtered_frame, float_pts);
1462 case AVMEDIA_TYPE_AUDIO:
1463 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1464 enc->channels != av_frame_get_channels(filtered_frame)) {
1465 av_log(NULL, AV_LOG_ERROR,
1466 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1469 do_audio_out(of, ost, filtered_frame);
1472 // TODO support subtitle filters
1476 av_frame_unref(filtered_frame);
1483 static void print_final_stats(int64_t total_size)
1485 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1486 uint64_t subtitle_size = 0;
1487 uint64_t data_size = 0;
1488 float percent = -1.0;
1492 for (i = 0; i < nb_output_streams; i++) {
1493 OutputStream *ost = output_streams[i];
1494 switch (ost->enc_ctx->codec_type) {
1495 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1496 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1497 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1498 default: other_size += ost->data_size; break;
1500 extra_size += ost->enc_ctx->extradata_size;
1501 data_size += ost->data_size;
1502 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1503 != AV_CODEC_FLAG_PASS1)
1507 if (data_size && total_size>0 && total_size >= data_size)
1508 percent = 100.0 * (total_size - data_size) / data_size;
1510 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1511 video_size / 1024.0,
1512 audio_size / 1024.0,
1513 subtitle_size / 1024.0,
1514 other_size / 1024.0,
1515 extra_size / 1024.0);
1517 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1519 av_log(NULL, AV_LOG_INFO, "unknown");
1520 av_log(NULL, AV_LOG_INFO, "\n");
1522 /* print verbose per-stream stats */
1523 for (i = 0; i < nb_input_files; i++) {
1524 InputFile *f = input_files[i];
1525 uint64_t total_packets = 0, total_size = 0;
1527 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1528 i, f->ctx->filename);
1530 for (j = 0; j < f->nb_streams; j++) {
1531 InputStream *ist = input_streams[f->ist_index + j];
1532 enum AVMediaType type = ist->dec_ctx->codec_type;
1534 total_size += ist->data_size;
1535 total_packets += ist->nb_packets;
1537 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1538 i, j, media_type_string(type));
1539 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1540 ist->nb_packets, ist->data_size);
1542 if (ist->decoding_needed) {
1543 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1544 ist->frames_decoded);
1545 if (type == AVMEDIA_TYPE_AUDIO)
1546 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1547 av_log(NULL, AV_LOG_VERBOSE, "; ");
1550 av_log(NULL, AV_LOG_VERBOSE, "\n");
1553 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1554 total_packets, total_size);
1557 for (i = 0; i < nb_output_files; i++) {
1558 OutputFile *of = output_files[i];
1559 uint64_t total_packets = 0, total_size = 0;
1561 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1562 i, of->ctx->filename);
1564 for (j = 0; j < of->ctx->nb_streams; j++) {
1565 OutputStream *ost = output_streams[of->ost_index + j];
1566 enum AVMediaType type = ost->enc_ctx->codec_type;
1568 total_size += ost->data_size;
1569 total_packets += ost->packets_written;
1571 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1572 i, j, media_type_string(type));
1573 if (ost->encoding_needed) {
1574 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1575 ost->frames_encoded);
1576 if (type == AVMEDIA_TYPE_AUDIO)
1577 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1578 av_log(NULL, AV_LOG_VERBOSE, "; ");
1581 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1582 ost->packets_written, ost->data_size);
1584 av_log(NULL, AV_LOG_VERBOSE, "\n");
1587 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1588 total_packets, total_size);
1590 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1591 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1593 av_log(NULL, AV_LOG_WARNING, "\n");
1595 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1600 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1603 AVBPrint buf_script;
1605 AVFormatContext *oc;
1607 AVCodecContext *enc;
1608 int frame_number, vid, i;
1611 int64_t pts = INT64_MIN + 1;
1612 static int64_t last_time = -1;
1613 static int qp_histogram[52];
1614 int hours, mins, secs, us;
1618 if (!print_stats && !is_last_report && !progress_avio)
1621 if (!is_last_report) {
1622 if (last_time == -1) {
1623 last_time = cur_time;
1626 if ((cur_time - last_time) < 500000)
1628 last_time = cur_time;
1631 t = (cur_time-timer_start) / 1000000.0;
1634 oc = output_files[0]->ctx;
1636 total_size = avio_size(oc->pb);
1637 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1638 total_size = avio_tell(oc->pb);
1642 av_bprint_init(&buf_script, 0, 1);
1643 for (i = 0; i < nb_output_streams; i++) {
1645 ost = output_streams[i];
1647 if (!ost->stream_copy)
1648 q = ost->quality / (float) FF_QP2LAMBDA;
1650 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1652 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1653 ost->file_index, ost->index, q);
1655 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658 frame_number = ost->frame_number;
1659 fps = t > 1 ? frame_number / t : 0;
1660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1661 frame_number, fps < 9.95, fps, q);
1662 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1663 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1664 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1665 ost->file_index, ost->index, q);
1667 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1671 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1673 for (j = 0; j < 32; j++)
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1677 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1679 double error, error_sum = 0;
1680 double scale, scale_sum = 0;
1682 char type[3] = { 'Y','U','V' };
1683 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1684 for (j = 0; j < 3; j++) {
1685 if (is_last_report) {
1686 error = enc->error[j];
1687 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1689 error = ost->error[j];
1690 scale = enc->width * enc->height * 255.0 * 255.0;
1696 p = psnr(error / scale);
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1698 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1699 ost->file_index, ost->index, type[j] | 32, p);
1701 p = psnr(error_sum / scale_sum);
1702 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1703 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1704 ost->file_index, ost->index, p);
1708 /* compute min output value */
1709 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1710 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1711 ost->st->time_base, AV_TIME_BASE_Q));
1713 nb_frames_drop += ost->last_dropped;
1716 secs = FFABS(pts) / AV_TIME_BASE;
1717 us = FFABS(pts) % AV_TIME_BASE;
1723 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1724 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1726 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1728 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1729 "size=%8.0fkB time=", total_size / 1024.0);
1731 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1732 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1733 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1734 (100 * us) / AV_TIME_BASE);
1737 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1738 av_bprintf(&buf_script, "bitrate=N/A\n");
1740 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1741 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1744 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1745 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1746 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1747 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1748 hours, mins, secs, us);
1750 if (nb_frames_dup || nb_frames_drop)
1751 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1752 nb_frames_dup, nb_frames_drop);
1753 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1754 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1757 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1758 av_bprintf(&buf_script, "speed=N/A\n");
1760 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1761 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1764 if (print_stats || is_last_report) {
1765 const char end = is_last_report ? '\n' : '\r';
1766 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1767 fprintf(stderr, "%s %c", buf, end);
1769 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1774 if (progress_avio) {
1775 av_bprintf(&buf_script, "progress=%s\n",
1776 is_last_report ? "end" : "continue");
1777 avio_write(progress_avio, buf_script.str,
1778 FFMIN(buf_script.len, buf_script.size - 1));
1779 avio_flush(progress_avio);
1780 av_bprint_finalize(&buf_script, NULL);
1781 if (is_last_report) {
1782 if ((ret = avio_closep(&progress_avio)) < 0)
1783 av_log(NULL, AV_LOG_ERROR,
1784 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1789 print_final_stats(total_size);
1792 static void flush_encoders(void)
1796 for (i = 0; i < nb_output_streams; i++) {
1797 OutputStream *ost = output_streams[i];
1798 AVCodecContext *enc = ost->enc_ctx;
1799 OutputFile *of = output_files[ost->file_index];
1801 if (!ost->encoding_needed)
1804 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1806 #if FF_API_LAVF_FMT_RAWPICTURE
1807 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1811 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1814 avcodec_send_frame(enc, NULL);
1817 const char *desc = NULL;
1821 switch (enc->codec_type) {
1822 case AVMEDIA_TYPE_AUDIO:
1825 case AVMEDIA_TYPE_VIDEO:
1832 av_init_packet(&pkt);
1836 update_benchmark(NULL);
1837 ret = avcodec_receive_packet(enc, &pkt);
1838 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1839 if (ret < 0 && ret != AVERROR_EOF) {
1840 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1845 if (ost->logfile && enc->stats_out) {
1846 fprintf(ost->logfile, "%s", enc->stats_out);
1848 if (ret == AVERROR_EOF) {
1851 if (ost->finished & MUXER_FINISHED) {
1852 av_packet_unref(&pkt);
1855 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1856 pkt_size = pkt.size;
1857 output_packet(of, &pkt, ost);
1858 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1859 do_video_stats(ost, pkt_size);
1866 * Check whether a packet from ist should be written into ost at this time
1868 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1870 OutputFile *of = output_files[ost->file_index];
1871 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1873 if (ost->source_index != ist_index)
1879 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1885 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1887 OutputFile *of = output_files[ost->file_index];
1888 InputFile *f = input_files [ist->file_index];
1889 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1890 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1894 av_init_packet(&opkt);
1896 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1897 !ost->copy_initial_nonkeyframes)
1900 if (!ost->frame_number && !ost->copy_prior_start) {
1901 int64_t comp_start = start_time;
1902 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1903 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1904 if (pkt->pts == AV_NOPTS_VALUE ?
1905 ist->pts < comp_start :
1906 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1910 if (of->recording_time != INT64_MAX &&
1911 ist->pts >= of->recording_time + start_time) {
1912 close_output_stream(ost);
1916 if (f->recording_time != INT64_MAX) {
1917 start_time = f->ctx->start_time;
1918 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1919 start_time += f->start_time;
1920 if (ist->pts >= f->recording_time + start_time) {
1921 close_output_stream(ost);
1926 /* force the input stream PTS */
1927 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1930 if (pkt->pts != AV_NOPTS_VALUE)
1931 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1933 opkt.pts = AV_NOPTS_VALUE;
1935 if (pkt->dts == AV_NOPTS_VALUE)
1936 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1938 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1939 opkt.dts -= ost_tb_start_time;
1941 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1942 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1944 duration = ist->dec_ctx->frame_size;
1945 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1946 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1947 ost->st->time_base) - ost_tb_start_time;
1950 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1951 opkt.flags = pkt->flags;
1952 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1953 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1954 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1955 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1956 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1958 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1959 &opkt.data, &opkt.size,
1960 pkt->data, pkt->size,
1961 pkt->flags & AV_PKT_FLAG_KEY);
1963 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1968 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1973 opkt.data = pkt->data;
1974 opkt.size = pkt->size;
1976 av_copy_packet_side_data(&opkt, pkt);
1978 #if FF_API_LAVF_FMT_RAWPICTURE
1979 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1980 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1981 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1982 /* store AVPicture in AVPacket, as expected by the output format */
1983 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1985 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1989 opkt.data = (uint8_t *)&pict;
1990 opkt.size = sizeof(AVPicture);
1991 opkt.flags |= AV_PKT_FLAG_KEY;
1995 output_packet(of, &opkt, ost);
1998 int guess_input_channel_layout(InputStream *ist)
2000 AVCodecContext *dec = ist->dec_ctx;
2002 if (!dec->channel_layout) {
2003 char layout_name[256];
2005 if (dec->channels > ist->guess_layout_max)
2007 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2008 if (!dec->channel_layout)
2010 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2011 dec->channels, dec->channel_layout);
2012 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2013 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2018 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2020 if (*got_output || ret<0)
2021 decode_error_stat[ret<0] ++;
2023 if (ret < 0 && exit_on_error)
2026 if (exit_on_error && *got_output && ist) {
2027 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2028 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2034 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2035 // There is the following difference: if you got a frame, you must call
2036 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2037 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2038 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2045 ret = avcodec_send_packet(avctx, pkt);
2046 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2047 // decoded frames with avcodec_receive_frame() until done.
2048 if (ret < 0 && ret != AVERROR_EOF)
2052 ret = avcodec_receive_frame(avctx, frame);
2053 if (ret < 0 && ret != AVERROR(EAGAIN))
2061 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2066 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2067 for (i = 0; i < ist->nb_filters; i++) {
2068 if (i < ist->nb_filters - 1) {
2069 f = ist->filter_frame;
2070 ret = av_frame_ref(f, decoded_frame);
2075 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2076 AV_BUFFERSRC_FLAG_PUSH);
2077 if (ret == AVERROR_EOF)
2078 ret = 0; /* ignore */
2080 av_log(NULL, AV_LOG_ERROR,
2081 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2088 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2090 AVFrame *decoded_frame;
2091 AVCodecContext *avctx = ist->dec_ctx;
2092 int i, ret, err = 0, resample_changed;
2093 AVRational decoded_frame_tb;
2095 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2096 return AVERROR(ENOMEM);
2097 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2098 return AVERROR(ENOMEM);
2099 decoded_frame = ist->decoded_frame;
2101 update_benchmark(NULL);
2102 ret = decode(avctx, decoded_frame, got_output, pkt);
2103 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2105 if (ret >= 0 && avctx->sample_rate <= 0) {
2106 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2107 ret = AVERROR_INVALIDDATA;
2110 if (ret != AVERROR_EOF)
2111 check_decode_result(ist, got_output, ret);
2113 if (!*got_output || ret < 0)
2116 ist->samples_decoded += decoded_frame->nb_samples;
2117 ist->frames_decoded++;
2120 /* increment next_dts to use for the case where the input stream does not
2121 have timestamps or there are multiple frames in the packet */
2122 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2124 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2128 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2129 ist->resample_channels != avctx->channels ||
2130 ist->resample_channel_layout != decoded_frame->channel_layout ||
2131 ist->resample_sample_rate != decoded_frame->sample_rate;
2132 if (resample_changed) {
2133 char layout1[64], layout2[64];
2135 if (!guess_input_channel_layout(ist)) {
2136 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2137 "layout for Input Stream #%d.%d\n", ist->file_index,
2141 decoded_frame->channel_layout = avctx->channel_layout;
2143 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2144 ist->resample_channel_layout);
2145 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2146 decoded_frame->channel_layout);
2148 av_log(NULL, AV_LOG_INFO,
2149 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2150 ist->file_index, ist->st->index,
2151 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2152 ist->resample_channels, layout1,
2153 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2154 avctx->channels, layout2);
2156 ist->resample_sample_fmt = decoded_frame->format;
2157 ist->resample_sample_rate = decoded_frame->sample_rate;
2158 ist->resample_channel_layout = decoded_frame->channel_layout;
2159 ist->resample_channels = avctx->channels;
2161 for (i = 0; i < ist->nb_filters; i++) {
2162 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2164 av_log(NULL, AV_LOG_ERROR,
2165 "Error reconfiguring input stream %d:%d filter %d\n",
2166 ist->file_index, ist->st->index, i);
2171 for (i = 0; i < nb_filtergraphs; i++)
2172 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2173 FilterGraph *fg = filtergraphs[i];
2174 if (configure_filtergraph(fg) < 0) {
2175 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2181 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2182 decoded_frame_tb = ist->st->time_base;
2183 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2184 decoded_frame->pts = pkt->pts;
2185 decoded_frame_tb = ist->st->time_base;
2187 decoded_frame->pts = ist->dts;
2188 decoded_frame_tb = AV_TIME_BASE_Q;
2190 if (decoded_frame->pts != AV_NOPTS_VALUE)
2191 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2192 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2193 (AVRational){1, avctx->sample_rate});
2194 ist->nb_samples = decoded_frame->nb_samples;
2195 err = send_frame_to_filters(ist, decoded_frame);
2196 decoded_frame->pts = AV_NOPTS_VALUE;
2199 av_frame_unref(ist->filter_frame);
2200 av_frame_unref(decoded_frame);
2201 return err < 0 ? err : ret;
2204 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2206 AVFrame *decoded_frame;
2207 int i, ret = 0, err = 0, resample_changed;
2208 int64_t best_effort_timestamp;
2209 int64_t dts = AV_NOPTS_VALUE;
2212 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2213 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2215 if (!eof && pkt && pkt->size == 0)
2218 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2219 return AVERROR(ENOMEM);
2220 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2221 return AVERROR(ENOMEM);
2222 decoded_frame = ist->decoded_frame;
2223 if (ist->dts != AV_NOPTS_VALUE)
2224 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2227 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2230 // The old code used to set dts on the drain packet, which does not work
2231 // with the new API anymore.
2233 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2235 return AVERROR(ENOMEM);
2236 ist->dts_buffer = new;
2237 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2240 update_benchmark(NULL);
2241 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2242 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2244 // The following line may be required in some cases where there is no parser
2245 // or the parser does not has_b_frames correctly
2246 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2247 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2248 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2250 av_log(ist->dec_ctx, AV_LOG_WARNING,
2251 "video_delay is larger in decoder than demuxer %d > %d.\n"
2252 "If you want to help, upload a sample "
2253 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2254 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2255 ist->dec_ctx->has_b_frames,
2256 ist->st->codecpar->video_delay);
2259 if (ret != AVERROR_EOF)
2260 check_decode_result(ist, got_output, ret);
2262 if (*got_output && ret >= 0) {
2263 if (ist->dec_ctx->width != decoded_frame->width ||
2264 ist->dec_ctx->height != decoded_frame->height ||
2265 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2266 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2267 decoded_frame->width,
2268 decoded_frame->height,
2269 decoded_frame->format,
2270 ist->dec_ctx->width,
2271 ist->dec_ctx->height,
2272 ist->dec_ctx->pix_fmt);
2276 if (!*got_output || ret < 0)
2279 if(ist->top_field_first>=0)
2280 decoded_frame->top_field_first = ist->top_field_first;
2282 ist->frames_decoded++;
2284 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2285 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2289 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2291 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2293 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2294 best_effort_timestamp = ist->dts_buffer[0];
2296 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2297 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2298 ist->nb_dts_buffer--;
2301 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2302 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2304 if (ts != AV_NOPTS_VALUE)
2305 ist->next_pts = ist->pts = ts;
2309 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2310 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2311 ist->st->index, av_ts2str(decoded_frame->pts),
2312 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2313 best_effort_timestamp,
2314 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2315 decoded_frame->key_frame, decoded_frame->pict_type,
2316 ist->st->time_base.num, ist->st->time_base.den);
2319 if (ist->st->sample_aspect_ratio.num)
2320 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2322 resample_changed = ist->resample_width != decoded_frame->width ||
2323 ist->resample_height != decoded_frame->height ||
2324 ist->resample_pix_fmt != decoded_frame->format;
2325 if (resample_changed) {
2326 av_log(NULL, AV_LOG_INFO,
2327 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2328 ist->file_index, ist->st->index,
2329 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2330 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2332 ist->resample_width = decoded_frame->width;
2333 ist->resample_height = decoded_frame->height;
2334 ist->resample_pix_fmt = decoded_frame->format;
2336 for (i = 0; i < ist->nb_filters; i++) {
2337 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2339 av_log(NULL, AV_LOG_ERROR,
2340 "Error reconfiguring input stream %d:%d filter %d\n",
2341 ist->file_index, ist->st->index, i);
2346 for (i = 0; i < nb_filtergraphs; i++) {
2347 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2348 configure_filtergraph(filtergraphs[i]) < 0) {
2349 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2355 err = send_frame_to_filters(ist, decoded_frame);
2358 av_frame_unref(ist->filter_frame);
2359 av_frame_unref(decoded_frame);
2360 return err < 0 ? err : ret;
2363 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2365 AVSubtitle subtitle;
2366 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2367 &subtitle, got_output, pkt);
2369 check_decode_result(NULL, got_output, ret);
2371 if (ret < 0 || !*got_output) {
2373 sub2video_flush(ist);
2377 if (ist->fix_sub_duration) {
2379 if (ist->prev_sub.got_output) {
2380 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2381 1000, AV_TIME_BASE);
2382 if (end < ist->prev_sub.subtitle.end_display_time) {
2383 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2384 "Subtitle duration reduced from %d to %d%s\n",
2385 ist->prev_sub.subtitle.end_display_time, end,
2386 end <= 0 ? ", dropping it" : "");
2387 ist->prev_sub.subtitle.end_display_time = end;
2390 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2391 FFSWAP(int, ret, ist->prev_sub.ret);
2392 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2400 sub2video_update(ist, &subtitle);
2402 if (!subtitle.num_rects)
2405 ist->frames_decoded++;
2407 for (i = 0; i < nb_output_streams; i++) {
2408 OutputStream *ost = output_streams[i];
2410 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2411 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2414 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2418 avsubtitle_free(&subtitle);
2422 static int send_filter_eof(InputStream *ist)
2425 for (i = 0; i < ist->nb_filters; i++) {
2426 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2433 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2434 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2438 int eof_reached = 0;
2441 if (!ist->saw_first_ts) {
2442 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2444 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2445 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2446 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2448 ist->saw_first_ts = 1;
2451 if (ist->next_dts == AV_NOPTS_VALUE)
2452 ist->next_dts = ist->dts;
2453 if (ist->next_pts == AV_NOPTS_VALUE)
2454 ist->next_pts = ist->pts;
2458 av_init_packet(&avpkt);
2465 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2466 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2467 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2468 ist->next_pts = ist->pts = ist->dts;
2471 // while we have more to decode or while the decoder did output something on EOF
2472 while (ist->decoding_needed) {
2476 ist->pts = ist->next_pts;
2477 ist->dts = ist->next_dts;
2479 switch (ist->dec_ctx->codec_type) {
2480 case AVMEDIA_TYPE_AUDIO:
2481 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2483 case AVMEDIA_TYPE_VIDEO:
2484 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2485 if (!repeating || !pkt || got_output) {
2486 if (pkt && pkt->duration) {
2487 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2488 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2489 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2490 duration = ((int64_t)AV_TIME_BASE *
2491 ist->dec_ctx->framerate.den * ticks) /
2492 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2495 if(ist->dts != AV_NOPTS_VALUE && duration) {
2496 ist->next_dts += duration;
2498 ist->next_dts = AV_NOPTS_VALUE;
2502 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2504 case AVMEDIA_TYPE_SUBTITLE:
2507 ret = transcode_subtitles(ist, &avpkt, &got_output);
2508 if (!pkt && ret >= 0)
2515 if (ret == AVERROR_EOF) {
2521 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2522 ist->file_index, ist->st->index, av_err2str(ret));
2525 // Decoding might not terminate if we're draining the decoder, and
2526 // the decoder keeps returning an error.
2527 // This should probably be considered a libavcodec issue.
2528 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2537 // During draining, we might get multiple output frames in this loop.
2538 // ffmpeg.c does not drain the filter chain on configuration changes,
2539 // which means if we send multiple frames at once to the filters, and
2540 // one of those frames changes configuration, the buffered frames will
2541 // be lost. This can upset certain FATE tests.
2542 // Decode only 1 frame per call on EOF to appease these FATE tests.
2543 // The ideal solution would be to rewrite decoding to use the new
2544 // decoding API in a better way.
2551 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2552 /* except when looping we need to flush but not to send an EOF */
2553 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2554 int ret = send_filter_eof(ist);
2556 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2561 /* handle stream copy */
2562 if (!ist->decoding_needed) {
2563 ist->dts = ist->next_dts;
2564 switch (ist->dec_ctx->codec_type) {
2565 case AVMEDIA_TYPE_AUDIO:
2566 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2567 ist->dec_ctx->sample_rate;
2569 case AVMEDIA_TYPE_VIDEO:
2570 if (ist->framerate.num) {
2571 // TODO: Remove work-around for c99-to-c89 issue 7
2572 AVRational time_base_q = AV_TIME_BASE_Q;
2573 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2574 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2575 } else if (pkt->duration) {
2576 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2577 } else if(ist->dec_ctx->framerate.num != 0) {
2578 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2579 ist->next_dts += ((int64_t)AV_TIME_BASE *
2580 ist->dec_ctx->framerate.den * ticks) /
2581 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2585 ist->pts = ist->dts;
2586 ist->next_pts = ist->next_dts;
2588 for (i = 0; pkt && i < nb_output_streams; i++) {
2589 OutputStream *ost = output_streams[i];
2591 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2594 do_streamcopy(ist, ost, pkt);
2597 return !eof_reached;
2600 static void print_sdp(void)
2605 AVIOContext *sdp_pb;
2606 AVFormatContext **avc;
2608 for (i = 0; i < nb_output_files; i++) {
2609 if (!output_files[i]->header_written)
2613 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2616 for (i = 0, j = 0; i < nb_output_files; i++) {
2617 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2618 avc[j] = output_files[i]->ctx;
2626 av_sdp_create(avc, j, sdp, sizeof(sdp));
2628 if (!sdp_filename) {
2629 printf("SDP:\n%s\n", sdp);
2632 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2633 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2635 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2636 avio_closep(&sdp_pb);
2637 av_freep(&sdp_filename);
2645 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2648 for (i = 0; hwaccels[i].name; i++)
2649 if (hwaccels[i].pix_fmt == pix_fmt)
2650 return &hwaccels[i];
2654 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2656 InputStream *ist = s->opaque;
2657 const enum AVPixelFormat *p;
2660 for (p = pix_fmts; *p != -1; p++) {
2661 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2662 const HWAccel *hwaccel;
2664 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2667 hwaccel = get_hwaccel(*p);
2669 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2670 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2673 ret = hwaccel->init(s);
2675 if (ist->hwaccel_id == hwaccel->id) {
2676 av_log(NULL, AV_LOG_FATAL,
2677 "%s hwaccel requested for input stream #%d:%d, "
2678 "but cannot be initialized.\n", hwaccel->name,
2679 ist->file_index, ist->st->index);
2680 return AV_PIX_FMT_NONE;
2685 if (ist->hw_frames_ctx) {
2686 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2687 if (!s->hw_frames_ctx)
2688 return AV_PIX_FMT_NONE;
2691 ist->active_hwaccel_id = hwaccel->id;
2692 ist->hwaccel_pix_fmt = *p;
2699 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2701 InputStream *ist = s->opaque;
2703 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2704 return ist->hwaccel_get_buffer(s, frame, flags);
2706 return avcodec_default_get_buffer2(s, frame, flags);
2709 static int init_input_stream(int ist_index, char *error, int error_len)
2712 InputStream *ist = input_streams[ist_index];
2714 for (i = 0; i < ist->nb_filters; i++) {
2715 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2717 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2722 if (ist->decoding_needed) {
2723 AVCodec *codec = ist->dec;
2725 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2726 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2727 return AVERROR(EINVAL);
2730 ist->dec_ctx->opaque = ist;
2731 ist->dec_ctx->get_format = get_format;
2732 ist->dec_ctx->get_buffer2 = get_buffer;
2733 ist->dec_ctx->thread_safe_callbacks = 1;
2735 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2736 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2737 (ist->decoding_needed & DECODING_FOR_OST)) {
2738 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2739 if (ist->decoding_needed & DECODING_FOR_FILTER)
2740 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2743 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2745 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2746 * audio, and video decoders such as cuvid or mediacodec */
2747 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2749 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2750 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2751 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2752 if (ret == AVERROR_EXPERIMENTAL)
2753 abort_codec_experimental(codec, 0);
2755 snprintf(error, error_len,
2756 "Error while opening decoder for input stream "
2758 ist->file_index, ist->st->index, av_err2str(ret));
2761 assert_avoptions(ist->decoder_opts);
2764 ist->next_pts = AV_NOPTS_VALUE;
2765 ist->next_dts = AV_NOPTS_VALUE;
2770 static InputStream *get_input_stream(OutputStream *ost)
2772 if (ost->source_index >= 0)
2773 return input_streams[ost->source_index];
2777 static int compare_int64(const void *a, const void *b)
2779 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2782 /* open the muxer when all the streams are initialized */
2783 static int check_init_output_file(OutputFile *of, int file_index)
2787 for (i = 0; i < of->ctx->nb_streams; i++) {
2788 OutputStream *ost = output_streams[of->ost_index + i];
2789 if (!ost->initialized)
2793 of->ctx->interrupt_callback = int_cb;
2795 ret = avformat_write_header(of->ctx, &of->opts);
2797 av_log(NULL, AV_LOG_ERROR,
2798 "Could not write header for output file #%d "
2799 "(incorrect codec parameters ?): %s\n",
2800 file_index, av_err2str(ret));
2803 //assert_avoptions(of->opts);
2804 of->header_written = 1;
2806 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2808 if (sdp_filename || want_sdp)
2811 /* flush the muxing queues */
2812 for (i = 0; i < of->ctx->nb_streams; i++) {
2813 OutputStream *ost = output_streams[of->ost_index + i];
2815 while (av_fifo_size(ost->muxing_queue)) {
2817 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2818 write_packet(of, &pkt, ost);
2825 static int init_output_bsfs(OutputStream *ost)
2830 if (!ost->nb_bitstream_filters)
2833 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2834 ctx = ost->bsf_ctx[i];
2836 ret = avcodec_parameters_copy(ctx->par_in,
2837 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2841 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2843 ret = av_bsf_init(ctx);
2845 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2846 ost->bsf_ctx[i]->filter->name);
2851 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2852 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2856 ost->st->time_base = ctx->time_base_out;
2861 static int init_output_stream_streamcopy(OutputStream *ost)
2863 OutputFile *of = output_files[ost->file_index];
2864 InputStream *ist = get_input_stream(ost);
2865 AVCodecParameters *par_dst = ost->st->codecpar;
2866 AVCodecParameters *par_src = ost->ref_par;
2869 uint32_t codec_tag = par_dst->codec_tag;
2871 av_assert0(ist && !ost->filter);
2873 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2875 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2877 av_log(NULL, AV_LOG_FATAL,
2878 "Error setting up codec context options.\n");
2881 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2884 unsigned int codec_tag_tmp;
2885 if (!of->ctx->oformat->codec_tag ||
2886 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2887 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2888 codec_tag = par_src->codec_tag;
2891 ret = avcodec_parameters_copy(par_dst, par_src);
2895 par_dst->codec_tag = codec_tag;
2897 if (!ost->frame_rate.num)
2898 ost->frame_rate = ist->framerate;
2899 ost->st->avg_frame_rate = ost->frame_rate;
2901 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2905 // copy timebase while removing common factors
2906 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2909 ost->st->disposition = ist->st->disposition;
2911 if (ist->st->nb_side_data) {
2912 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2913 sizeof(*ist->st->side_data));
2914 if (!ost->st->side_data)
2915 return AVERROR(ENOMEM);
2917 ost->st->nb_side_data = 0;
2918 for (i = 0; i < ist->st->nb_side_data; i++) {
2919 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2920 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2922 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2925 sd_dst->data = av_malloc(sd_src->size);
2927 return AVERROR(ENOMEM);
2928 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2929 sd_dst->size = sd_src->size;
2930 sd_dst->type = sd_src->type;
2931 ost->st->nb_side_data++;
2935 ost->parser = av_parser_init(par_dst->codec_id);
2936 ost->parser_avctx = avcodec_alloc_context3(NULL);
2937 if (!ost->parser_avctx)
2938 return AVERROR(ENOMEM);
2940 switch (par_dst->codec_type) {
2941 case AVMEDIA_TYPE_AUDIO:
2942 if (audio_volume != 256) {
2943 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2946 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2947 par_dst->block_align= 0;
2948 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2949 par_dst->block_align= 0;
2951 case AVMEDIA_TYPE_VIDEO:
2952 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2954 av_mul_q(ost->frame_aspect_ratio,
2955 (AVRational){ par_dst->height, par_dst->width });
2956 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2957 "with stream copy may produce invalid files\n");
2959 else if (ist->st->sample_aspect_ratio.num)
2960 sar = ist->st->sample_aspect_ratio;
2962 sar = par_src->sample_aspect_ratio;
2963 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2964 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2965 ost->st->r_frame_rate = ist->st->r_frame_rate;
2972 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2974 AVDictionaryEntry *e;
2976 uint8_t *encoder_string;
2977 int encoder_string_len;
2978 int format_flags = 0;
2979 int codec_flags = 0;
2981 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2984 e = av_dict_get(of->opts, "fflags", NULL, 0);
2986 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2989 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2991 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2993 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2996 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2999 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3000 encoder_string = av_mallocz(encoder_string_len);
3001 if (!encoder_string)
3004 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3005 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3007 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3008 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3009 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3010 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3013 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3014 AVCodecContext *avctx)
3017 int n = 1, i, size, index = 0;
3020 for (p = kf; *p; p++)
3024 pts = av_malloc_array(size, sizeof(*pts));
3026 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3031 for (i = 0; i < n; i++) {
3032 char *next = strchr(p, ',');
3037 if (!memcmp(p, "chapters", 8)) {
3039 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3042 if (avf->nb_chapters > INT_MAX - size ||
3043 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3045 av_log(NULL, AV_LOG_FATAL,
3046 "Could not allocate forced key frames array.\n");
3049 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3050 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3052 for (j = 0; j < avf->nb_chapters; j++) {
3053 AVChapter *c = avf->chapters[j];
3054 av_assert1(index < size);
3055 pts[index++] = av_rescale_q(c->start, c->time_base,
3056 avctx->time_base) + t;
3061 t = parse_time_or_die("force_key_frames", p, 1);
3062 av_assert1(index < size);
3063 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3070 av_assert0(index == size);
3071 qsort(pts, size, sizeof(*pts), compare_int64);
3072 ost->forced_kf_count = size;
3073 ost->forced_kf_pts = pts;
3076 static int init_output_stream_encode(OutputStream *ost)
3078 InputStream *ist = get_input_stream(ost);
3079 AVCodecContext *enc_ctx = ost->enc_ctx;
3080 AVCodecContext *dec_ctx = NULL;
3081 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3084 set_encoder_id(output_files[ost->file_index], ost);
3087 ost->st->disposition = ist->st->disposition;
3089 dec_ctx = ist->dec_ctx;
3091 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3093 for (j = 0; j < oc->nb_streams; j++) {
3094 AVStream *st = oc->streams[j];
3095 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3098 if (j == oc->nb_streams)
3099 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3100 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3101 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3104 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3105 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3106 filtergraph_is_simple(ost->filter->graph)) {
3107 FilterGraph *fg = ost->filter->graph;
3109 if (configure_filtergraph(fg)) {
3110 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3115 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3116 if (!ost->frame_rate.num)
3117 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3118 if (ist && !ost->frame_rate.num)
3119 ost->frame_rate = ist->framerate;
3120 if (ist && !ost->frame_rate.num)
3121 ost->frame_rate = ist->st->r_frame_rate;
3122 if (ist && !ost->frame_rate.num) {
3123 ost->frame_rate = (AVRational){25, 1};
3124 av_log(NULL, AV_LOG_WARNING,
3126 "about the input framerate is available. Falling "
3127 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3128 "if you want a different framerate.\n",
3129 ost->file_index, ost->index);
3131 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3132 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3133 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3134 ost->frame_rate = ost->enc->supported_framerates[idx];
3136 // reduce frame rate for mpeg4 to be within the spec limits
3137 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3138 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3139 ost->frame_rate.num, ost->frame_rate.den, 65535);
3143 switch (enc_ctx->codec_type) {
3144 case AVMEDIA_TYPE_AUDIO:
3145 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3147 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3148 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3149 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3150 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3151 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3152 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3154 case AVMEDIA_TYPE_VIDEO:
3155 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3156 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3157 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3158 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3159 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3160 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3161 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3163 for (j = 0; j < ost->forced_kf_count; j++)
3164 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3166 enc_ctx->time_base);
3168 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3169 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3170 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3171 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3172 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3173 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3174 if (!strncmp(ost->enc->name, "libx264", 7) &&
3175 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3176 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3177 av_log(NULL, AV_LOG_WARNING,
3178 "No pixel format specified, %s for H.264 encoding chosen.\n"
3179 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3180 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3181 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3182 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3183 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3184 av_log(NULL, AV_LOG_WARNING,
3185 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3186 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3187 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3188 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3190 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3191 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3193 ost->st->avg_frame_rate = ost->frame_rate;
3196 enc_ctx->width != dec_ctx->width ||
3197 enc_ctx->height != dec_ctx->height ||
3198 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3199 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3202 if (ost->forced_keyframes) {
3203 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3204 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3205 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3207 av_log(NULL, AV_LOG_ERROR,
3208 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3211 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3212 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3213 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3214 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3216 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3217 // parse it only for static kf timings
3218 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3219 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3223 case AVMEDIA_TYPE_SUBTITLE:
3224 enc_ctx->time_base = (AVRational){1, 1000};
3225 if (!enc_ctx->width) {
3226 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3227 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3230 case AVMEDIA_TYPE_DATA:
3240 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3244 if (ost->encoding_needed) {
3245 AVCodec *codec = ost->enc;
3246 AVCodecContext *dec = NULL;
3249 ret = init_output_stream_encode(ost);
3253 if ((ist = get_input_stream(ost)))
3255 if (dec && dec->subtitle_header) {
3256 /* ASS code assumes this buffer is null terminated so add extra byte. */
3257 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3258 if (!ost->enc_ctx->subtitle_header)
3259 return AVERROR(ENOMEM);
3260 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3261 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3263 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3264 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3265 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3267 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3268 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3269 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3271 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3272 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3273 if (!ost->enc_ctx->hw_frames_ctx)
3274 return AVERROR(ENOMEM);
3277 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3278 if (ret == AVERROR_EXPERIMENTAL)
3279 abort_codec_experimental(codec, 1);
3280 snprintf(error, error_len,
3281 "Error while opening encoder for output stream #%d:%d - "
3282 "maybe incorrect parameters such as bit_rate, rate, width or height",
3283 ost->file_index, ost->index);
3286 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3287 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3288 av_buffersink_set_frame_size(ost->filter->filter,
3289 ost->enc_ctx->frame_size);
3290 assert_avoptions(ost->encoder_opts);
3291 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3292 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3293 " It takes bits/s as argument, not kbits/s\n");
3295 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3297 av_log(NULL, AV_LOG_FATAL,
3298 "Error initializing the output stream codec context.\n");
3302 * FIXME: ost->st->codec should't be needed here anymore.
3304 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3308 if (ost->enc_ctx->nb_coded_side_data) {
3311 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3312 sizeof(*ost->st->side_data));
3313 if (!ost->st->side_data)
3314 return AVERROR(ENOMEM);
3316 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3317 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3318 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3320 sd_dst->data = av_malloc(sd_src->size);
3322 return AVERROR(ENOMEM);
3323 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3324 sd_dst->size = sd_src->size;
3325 sd_dst->type = sd_src->type;
3326 ost->st->nb_side_data++;
3330 // copy timebase while removing common factors
3331 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3332 ost->st->codec->codec= ost->enc_ctx->codec;
3333 } else if (ost->stream_copy) {
3334 ret = init_output_stream_streamcopy(ost);
3339 * FIXME: will the codec context used by the parser during streamcopy
3340 * This should go away with the new parser API.
3342 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3347 // parse user provided disposition, and update stream values
3348 if (ost->disposition) {
3349 static const AVOption opts[] = {
3350 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3351 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3352 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3353 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3354 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3355 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3356 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3357 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3358 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3359 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3360 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3361 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3362 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3363 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3366 static const AVClass class = {
3368 .item_name = av_default_item_name,
3370 .version = LIBAVUTIL_VERSION_INT,
3372 const AVClass *pclass = &class;
3374 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3379 /* initialize bitstream filters for the output stream
3380 * needs to be done here, because the codec id for streamcopy is not
3381 * known until now */
3382 ret = init_output_bsfs(ost);
3386 ost->initialized = 1;
3388 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3395 static void report_new_stream(int input_index, AVPacket *pkt)
3397 InputFile *file = input_files[input_index];
3398 AVStream *st = file->ctx->streams[pkt->stream_index];
3400 if (pkt->stream_index < file->nb_streams_warn)
3402 av_log(file->ctx, AV_LOG_WARNING,
3403 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3404 av_get_media_type_string(st->codecpar->codec_type),
3405 input_index, pkt->stream_index,
3406 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3407 file->nb_streams_warn = pkt->stream_index + 1;
3410 static int transcode_init(void)
3412 int ret = 0, i, j, k;
3413 AVFormatContext *oc;
3416 char error[1024] = {0};
3418 for (i = 0; i < nb_filtergraphs; i++) {
3419 FilterGraph *fg = filtergraphs[i];
3420 for (j = 0; j < fg->nb_outputs; j++) {
3421 OutputFilter *ofilter = fg->outputs[j];
3422 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3424 if (fg->nb_inputs != 1)
3426 for (k = nb_input_streams-1; k >= 0 ; k--)
3427 if (fg->inputs[0]->ist == input_streams[k])
3429 ofilter->ost->source_index = k;
3433 /* init framerate emulation */
3434 for (i = 0; i < nb_input_files; i++) {
3435 InputFile *ifile = input_files[i];
3436 if (ifile->rate_emu)
3437 for (j = 0; j < ifile->nb_streams; j++)
3438 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3441 /* hwaccel transcoding */
3442 for (i = 0; i < nb_output_streams; i++) {
3443 ost = output_streams[i];
3445 if (!ost->stream_copy) {
3447 if (qsv_transcode_init(ost))
3452 if (cuvid_transcode_init(ost))
3458 /* init input streams */
3459 for (i = 0; i < nb_input_streams; i++)
3460 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3461 for (i = 0; i < nb_output_streams; i++) {
3462 ost = output_streams[i];
3463 avcodec_close(ost->enc_ctx);
3468 /* open each encoder */
3469 for (i = 0; i < nb_output_streams; i++) {
3470 ret = init_output_stream(output_streams[i], error, sizeof(error));
3475 /* discard unused programs */
3476 for (i = 0; i < nb_input_files; i++) {
3477 InputFile *ifile = input_files[i];
3478 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3479 AVProgram *p = ifile->ctx->programs[j];
3480 int discard = AVDISCARD_ALL;
3482 for (k = 0; k < p->nb_stream_indexes; k++)
3483 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3484 discard = AVDISCARD_DEFAULT;
3487 p->discard = discard;
3491 /* write headers for files with no streams */
3492 for (i = 0; i < nb_output_files; i++) {
3493 oc = output_files[i]->ctx;
3494 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3495 ret = check_init_output_file(output_files[i], i);
3502 /* dump the stream mapping */
3503 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3504 for (i = 0; i < nb_input_streams; i++) {
3505 ist = input_streams[i];
3507 for (j = 0; j < ist->nb_filters; j++) {
3508 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3509 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3510 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3511 ist->filters[j]->name);
3512 if (nb_filtergraphs > 1)
3513 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3514 av_log(NULL, AV_LOG_INFO, "\n");
3519 for (i = 0; i < nb_output_streams; i++) {
3520 ost = output_streams[i];
3522 if (ost->attachment_filename) {
3523 /* an attached file */
3524 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3525 ost->attachment_filename, ost->file_index, ost->index);
3529 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3530 /* output from a complex graph */
3531 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3532 if (nb_filtergraphs > 1)
3533 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3535 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3536 ost->index, ost->enc ? ost->enc->name : "?");
3540 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3541 input_streams[ost->source_index]->file_index,
3542 input_streams[ost->source_index]->st->index,
3545 if (ost->sync_ist != input_streams[ost->source_index])
3546 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3547 ost->sync_ist->file_index,
3548 ost->sync_ist->st->index);
3549 if (ost->stream_copy)
3550 av_log(NULL, AV_LOG_INFO, " (copy)");
3552 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3553 const AVCodec *out_codec = ost->enc;
3554 const char *decoder_name = "?";
3555 const char *in_codec_name = "?";
3556 const char *encoder_name = "?";
3557 const char *out_codec_name = "?";
3558 const AVCodecDescriptor *desc;
3561 decoder_name = in_codec->name;
3562 desc = avcodec_descriptor_get(in_codec->id);
3564 in_codec_name = desc->name;
3565 if (!strcmp(decoder_name, in_codec_name))
3566 decoder_name = "native";
3570 encoder_name = out_codec->name;
3571 desc = avcodec_descriptor_get(out_codec->id);
3573 out_codec_name = desc->name;
3574 if (!strcmp(encoder_name, out_codec_name))
3575 encoder_name = "native";
3578 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3579 in_codec_name, decoder_name,
3580 out_codec_name, encoder_name);
3582 av_log(NULL, AV_LOG_INFO, "\n");
3586 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3590 transcode_init_done = 1;
3595 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3596 static int need_output(void)
3600 for (i = 0; i < nb_output_streams; i++) {
3601 OutputStream *ost = output_streams[i];
3602 OutputFile *of = output_files[ost->file_index];
3603 AVFormatContext *os = output_files[ost->file_index]->ctx;
3605 if (ost->finished ||
3606 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3608 if (ost->frame_number >= ost->max_frames) {
3610 for (j = 0; j < of->ctx->nb_streams; j++)
3611 close_output_stream(output_streams[of->ost_index + j]);
3622 * Select the output stream to process.
3624 * @return selected output stream, or NULL if none available
3626 static OutputStream *choose_output(void)
3629 int64_t opts_min = INT64_MAX;
3630 OutputStream *ost_min = NULL;
3632 for (i = 0; i < nb_output_streams; i++) {
3633 OutputStream *ost = output_streams[i];
3634 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3635 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3637 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3638 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3640 if (!ost->finished && opts < opts_min) {
3642 ost_min = ost->unavailable ? NULL : ost;
3648 static void set_tty_echo(int on)
3652 if (tcgetattr(0, &tty) == 0) {
3653 if (on) tty.c_lflag |= ECHO;
3654 else tty.c_lflag &= ~ECHO;
3655 tcsetattr(0, TCSANOW, &tty);
3660 static int check_keyboard_interaction(int64_t cur_time)
3663 static int64_t last_time;
3664 if (received_nb_signals)
3665 return AVERROR_EXIT;
3666 /* read_key() returns 0 on EOF */
3667 if(cur_time - last_time >= 100000 && !run_as_daemon){
3669 last_time = cur_time;
3673 return AVERROR_EXIT;
3674 if (key == '+') av_log_set_level(av_log_get_level()+10);
3675 if (key == '-') av_log_set_level(av_log_get_level()-10);
3676 if (key == 's') qp_hist ^= 1;
3679 do_hex_dump = do_pkt_dump = 0;
3680 } else if(do_pkt_dump){
3684 av_log_set_level(AV_LOG_DEBUG);
3686 if (key == 'c' || key == 'C'){
3687 char buf[4096], target[64], command[256], arg[256] = {0};
3690 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3693 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3698 fprintf(stderr, "\n");
3700 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3701 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3702 target, time, command, arg);
3703 for (i = 0; i < nb_filtergraphs; i++) {
3704 FilterGraph *fg = filtergraphs[i];
3707 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3708 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3709 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3710 } else if (key == 'c') {
3711 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3712 ret = AVERROR_PATCHWELCOME;
3714 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3716 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3721 av_log(NULL, AV_LOG_ERROR,
3722 "Parse error, at least 3 arguments were expected, "
3723 "only %d given in string '%s'\n", n, buf);
3726 if (key == 'd' || key == 'D'){
3729 debug = input_streams[0]->st->codec->debug<<1;
3730 if(!debug) debug = 1;
3731 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3738 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3743 fprintf(stderr, "\n");
3744 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3745 fprintf(stderr,"error parsing debug value\n");
3747 for(i=0;i<nb_input_streams;i++) {
3748 input_streams[i]->st->codec->debug = debug;
3750 for(i=0;i<nb_output_streams;i++) {
3751 OutputStream *ost = output_streams[i];
3752 ost->enc_ctx->debug = debug;
3754 if(debug) av_log_set_level(AV_LOG_DEBUG);
3755 fprintf(stderr,"debug=%d\n", debug);
3758 fprintf(stderr, "key function\n"
3759 "? show this help\n"
3760 "+ increase verbosity\n"
3761 "- decrease verbosity\n"
3762 "c Send command to first matching filter supporting it\n"
3763 "C Send/Queue command to all matching filters\n"
3764 "D cycle through available debug modes\n"
3765 "h dump packets/hex press to cycle through the 3 states\n"
3767 "s Show QP histogram\n"
3774 static void *input_thread(void *arg)
3777 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3782 ret = av_read_frame(f->ctx, &pkt);
3784 if (ret == AVERROR(EAGAIN)) {
3789 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3792 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3793 if (flags && ret == AVERROR(EAGAIN)) {
3795 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3796 av_log(f->ctx, AV_LOG_WARNING,
3797 "Thread message queue blocking; consider raising the "
3798 "thread_queue_size option (current value: %d)\n",
3799 f->thread_queue_size);
3802 if (ret != AVERROR_EOF)
3803 av_log(f->ctx, AV_LOG_ERROR,
3804 "Unable to send packet to main thread: %s\n",
3806 av_packet_unref(&pkt);
3807 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3815 static void free_input_threads(void)
3819 for (i = 0; i < nb_input_files; i++) {
3820 InputFile *f = input_files[i];
3823 if (!f || !f->in_thread_queue)
3825 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3826 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3827 av_packet_unref(&pkt);
3829 pthread_join(f->thread, NULL);
3831 av_thread_message_queue_free(&f->in_thread_queue);
3835 static int init_input_threads(void)
3839 if (nb_input_files == 1)
3842 for (i = 0; i < nb_input_files; i++) {
3843 InputFile *f = input_files[i];
3845 if (f->ctx->pb ? !f->ctx->pb->seekable :
3846 strcmp(f->ctx->iformat->name, "lavfi"))
3847 f->non_blocking = 1;
3848 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3849 f->thread_queue_size, sizeof(AVPacket));
3853 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3854 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3855 av_thread_message_queue_free(&f->in_thread_queue);
3856 return AVERROR(ret);
3862 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3864 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3866 AV_THREAD_MESSAGE_NONBLOCK : 0);
3870 static int get_input_packet(InputFile *f, AVPacket *pkt)
3874 for (i = 0; i < f->nb_streams; i++) {
3875 InputStream *ist = input_streams[f->ist_index + i];
3876 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3877 int64_t now = av_gettime_relative() - ist->start;
3879 return AVERROR(EAGAIN);
3884 if (nb_input_files > 1)
3885 return get_input_packet_mt(f, pkt);
3887 return av_read_frame(f->ctx, pkt);
3890 static int got_eagain(void)
3893 for (i = 0; i < nb_output_streams; i++)
3894 if (output_streams[i]->unavailable)
3899 static void reset_eagain(void)
3902 for (i = 0; i < nb_input_files; i++)
3903 input_files[i]->eagain = 0;
3904 for (i = 0; i < nb_output_streams; i++)
3905 output_streams[i]->unavailable = 0;
3908 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3909 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3910 AVRational time_base)
3916 return tmp_time_base;
3919 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3922 return tmp_time_base;
3928 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3931 AVCodecContext *avctx;
3932 int i, ret, has_audio = 0;
3933 int64_t duration = 0;
3935 ret = av_seek_frame(is, -1, is->start_time, 0);
3939 for (i = 0; i < ifile->nb_streams; i++) {
3940 ist = input_streams[ifile->ist_index + i];
3941 avctx = ist->dec_ctx;
3944 if (ist->decoding_needed) {
3945 process_input_packet(ist, NULL, 1);
3946 avcodec_flush_buffers(avctx);
3949 /* duration is the length of the last frame in a stream
3950 * when audio stream is present we don't care about
3951 * last video frame length because it's not defined exactly */
3952 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3956 for (i = 0; i < ifile->nb_streams; i++) {
3957 ist = input_streams[ifile->ist_index + i];
3958 avctx = ist->dec_ctx;
3961 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3962 AVRational sample_rate = {1, avctx->sample_rate};
3964 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3968 if (ist->framerate.num) {
3969 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3970 } else if (ist->st->avg_frame_rate.num) {
3971 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3972 } else duration = 1;
3974 if (!ifile->duration)
3975 ifile->time_base = ist->st->time_base;
3976 /* the total duration of the stream, max_pts - min_pts is
3977 * the duration of the stream without the last frame */
3978 duration += ist->max_pts - ist->min_pts;
3979 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3983 if (ifile->loop > 0)
3991 * - 0 -- one packet was read and processed
3992 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3993 * this function should be called again
3994 * - AVERROR_EOF -- this function should not be called again
3996 static int process_input(int file_index)
3998 InputFile *ifile = input_files[file_index];
3999 AVFormatContext *is;
4007 ret = get_input_packet(ifile, &pkt);
4009 if (ret == AVERROR(EAGAIN)) {
4013 if (ret < 0 && ifile->loop) {
4014 if ((ret = seek_to_start(ifile, is)) < 0)
4016 ret = get_input_packet(ifile, &pkt);
4017 if (ret == AVERROR(EAGAIN)) {
4023 if (ret != AVERROR_EOF) {
4024 print_error(is->filename, ret);
4029 for (i = 0; i < ifile->nb_streams; i++) {
4030 ist = input_streams[ifile->ist_index + i];
4031 if (ist->decoding_needed) {
4032 ret = process_input_packet(ist, NULL, 0);
4037 /* mark all outputs that don't go through lavfi as finished */
4038 for (j = 0; j < nb_output_streams; j++) {
4039 OutputStream *ost = output_streams[j];
4041 if (ost->source_index == ifile->ist_index + i &&
4042 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4043 finish_output_stream(ost);
4047 ifile->eof_reached = 1;
4048 return AVERROR(EAGAIN);
4054 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4055 is->streams[pkt.stream_index]);
4057 /* the following test is needed in case new streams appear
4058 dynamically in stream : we ignore them */
4059 if (pkt.stream_index >= ifile->nb_streams) {
4060 report_new_stream(file_index, &pkt);
4061 goto discard_packet;
4064 ist = input_streams[ifile->ist_index + pkt.stream_index];
4066 ist->data_size += pkt.size;
4070 goto discard_packet;
4072 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4073 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4078 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4079 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4080 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4081 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4082 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4083 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4084 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4085 av_ts2str(input_files[ist->file_index]->ts_offset),
4086 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4089 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4090 int64_t stime, stime2;
4091 // Correcting starttime based on the enabled streams
4092 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4093 // so we instead do it here as part of discontinuity handling
4094 if ( ist->next_dts == AV_NOPTS_VALUE
4095 && ifile->ts_offset == -is->start_time
4096 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4097 int64_t new_start_time = INT64_MAX;
4098 for (i=0; i<is->nb_streams; i++) {
4099 AVStream *st = is->streams[i];
4100 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4102 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4104 if (new_start_time > is->start_time) {
4105 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4106 ifile->ts_offset = -new_start_time;
4110 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4111 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4112 ist->wrap_correction_done = 1;
4114 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4115 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4116 ist->wrap_correction_done = 0;
4118 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4119 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4120 ist->wrap_correction_done = 0;
4124 /* add the stream-global side data to the first packet */
4125 if (ist->nb_packets == 1) {
4126 if (ist->st->nb_side_data)
4127 av_packet_split_side_data(&pkt);
4128 for (i = 0; i < ist->st->nb_side_data; i++) {
4129 AVPacketSideData *src_sd = &ist->st->side_data[i];
4132 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4134 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4137 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4141 memcpy(dst_data, src_sd->data, src_sd->size);
4145 if (pkt.dts != AV_NOPTS_VALUE)
4146 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4147 if (pkt.pts != AV_NOPTS_VALUE)
4148 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4150 if (pkt.pts != AV_NOPTS_VALUE)
4151 pkt.pts *= ist->ts_scale;
4152 if (pkt.dts != AV_NOPTS_VALUE)
4153 pkt.dts *= ist->ts_scale;
4155 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4156 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4157 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4158 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4159 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4160 int64_t delta = pkt_dts - ifile->last_ts;
4161 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4162 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4163 ifile->ts_offset -= delta;
4164 av_log(NULL, AV_LOG_DEBUG,
4165 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4166 delta, ifile->ts_offset);
4167 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4168 if (pkt.pts != AV_NOPTS_VALUE)
4169 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4173 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4174 if (pkt.pts != AV_NOPTS_VALUE) {
4175 pkt.pts += duration;
4176 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4177 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4180 if (pkt.dts != AV_NOPTS_VALUE)
4181 pkt.dts += duration;
4183 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4184 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4185 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4186 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4188 int64_t delta = pkt_dts - ist->next_dts;
4189 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4190 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4191 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4192 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4193 ifile->ts_offset -= delta;
4194 av_log(NULL, AV_LOG_DEBUG,
4195 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4196 delta, ifile->ts_offset);
4197 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4198 if (pkt.pts != AV_NOPTS_VALUE)
4199 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4202 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4203 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4204 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4205 pkt.dts = AV_NOPTS_VALUE;
4207 if (pkt.pts != AV_NOPTS_VALUE){
4208 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4209 delta = pkt_pts - ist->next_dts;
4210 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4211 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4212 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4213 pkt.pts = AV_NOPTS_VALUE;
4219 if (pkt.dts != AV_NOPTS_VALUE)
4220 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4223 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4224 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4225 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4226 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4227 av_ts2str(input_files[ist->file_index]->ts_offset),
4228 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4231 sub2video_heartbeat(ist, pkt.pts);
4233 process_input_packet(ist, &pkt, 0);
4236 av_packet_unref(&pkt);
4242 * Perform a step of transcoding for the specified filter graph.
4244 * @param[in] graph filter graph to consider
4245 * @param[out] best_ist input stream where a frame would allow to continue
4246 * @return 0 for success, <0 for error
4248 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4251 int nb_requests, nb_requests_max = 0;
4252 InputFilter *ifilter;
4256 ret = avfilter_graph_request_oldest(graph->graph);
4258 return reap_filters(0);
4260 if (ret == AVERROR_EOF) {
4261 ret = reap_filters(1);
4262 for (i = 0; i < graph->nb_outputs; i++)
4263 close_output_stream(graph->outputs[i]->ost);
4266 if (ret != AVERROR(EAGAIN))
4269 for (i = 0; i < graph->nb_inputs; i++) {
4270 ifilter = graph->inputs[i];
4272 if (input_files[ist->file_index]->eagain ||
4273 input_files[ist->file_index]->eof_reached)
4275 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4276 if (nb_requests > nb_requests_max) {
4277 nb_requests_max = nb_requests;
4283 for (i = 0; i < graph->nb_outputs; i++)
4284 graph->outputs[i]->ost->unavailable = 1;
4290 * Run a single step of transcoding.
4292 * @return 0 for success, <0 for error
4294 static int transcode_step(void)
4300 ost = choose_output();
4307 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4312 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4317 av_assert0(ost->source_index >= 0);
4318 ist = input_streams[ost->source_index];
4321 ret = process_input(ist->file_index);
4322 if (ret == AVERROR(EAGAIN)) {
4323 if (input_files[ist->file_index]->eagain)
4324 ost->unavailable = 1;
4329 return ret == AVERROR_EOF ? 0 : ret;
4331 return reap_filters(0);
4335 * The following code is the main loop of the file converter
4337 static int transcode(void)
4340 AVFormatContext *os;
4343 int64_t timer_start;
4344 int64_t total_packets_written = 0;
4346 ret = transcode_init();
4350 if (stdin_interaction) {
4351 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4354 timer_start = av_gettime_relative();
4357 if ((ret = init_input_threads()) < 0)
4361 while (!received_sigterm) {
4362 int64_t cur_time= av_gettime_relative();
4364 /* if 'q' pressed, exits */
4365 if (stdin_interaction)
4366 if (check_keyboard_interaction(cur_time) < 0)
4369 /* check if there's any stream where output is still needed */
4370 if (!need_output()) {
4371 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4375 ret = transcode_step();
4376 if (ret < 0 && ret != AVERROR_EOF) {
4378 av_strerror(ret, errbuf, sizeof(errbuf));
4380 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4384 /* dump report by using the output first video and audio streams */
4385 print_report(0, timer_start, cur_time);
4388 free_input_threads();
4391 /* at the end of stream, we must flush the decoder buffers */
4392 for (i = 0; i < nb_input_streams; i++) {
4393 ist = input_streams[i];
4394 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4395 process_input_packet(ist, NULL, 0);
4402 /* write the trailer if needed and close file */
4403 for (i = 0; i < nb_output_files; i++) {
4404 os = output_files[i]->ctx;
4405 if (!output_files[i]->header_written) {
4406 av_log(NULL, AV_LOG_ERROR,
4407 "Nothing was written into output file %d (%s), because "
4408 "at least one of its streams received no packets.\n",
4412 if ((ret = av_write_trailer(os)) < 0) {
4413 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4419 /* dump report by using the first video and audio streams */
4420 print_report(1, timer_start, av_gettime_relative());
4422 /* close each encoder */
4423 for (i = 0; i < nb_output_streams; i++) {
4424 ost = output_streams[i];
4425 if (ost->encoding_needed) {
4426 av_freep(&ost->enc_ctx->stats_in);
4428 total_packets_written += ost->packets_written;
4431 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4432 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4436 /* close each decoder */
4437 for (i = 0; i < nb_input_streams; i++) {
4438 ist = input_streams[i];
4439 if (ist->decoding_needed) {
4440 avcodec_close(ist->dec_ctx);
4441 if (ist->hwaccel_uninit)
4442 ist->hwaccel_uninit(ist->dec_ctx);
4446 av_buffer_unref(&hw_device_ctx);
4453 free_input_threads();
4456 if (output_streams) {
4457 for (i = 0; i < nb_output_streams; i++) {
4458 ost = output_streams[i];
4461 if (fclose(ost->logfile))
4462 av_log(NULL, AV_LOG_ERROR,
4463 "Error closing logfile, loss of information possible: %s\n",
4464 av_err2str(AVERROR(errno)));
4465 ost->logfile = NULL;
4467 av_freep(&ost->forced_kf_pts);
4468 av_freep(&ost->apad);
4469 av_freep(&ost->disposition);
4470 av_dict_free(&ost->encoder_opts);
4471 av_dict_free(&ost->sws_dict);
4472 av_dict_free(&ost->swr_opts);
4473 av_dict_free(&ost->resample_opts);
4481 static int64_t getutime(void)
4484 struct rusage rusage;
4486 getrusage(RUSAGE_SELF, &rusage);
4487 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4488 #elif HAVE_GETPROCESSTIMES
4490 FILETIME c, e, k, u;
4491 proc = GetCurrentProcess();
4492 GetProcessTimes(proc, &c, &e, &k, &u);
4493 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4495 return av_gettime_relative();
4499 static int64_t getmaxrss(void)
4501 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4502 struct rusage rusage;
4503 getrusage(RUSAGE_SELF, &rusage);
4504 return (int64_t)rusage.ru_maxrss * 1024;
4505 #elif HAVE_GETPROCESSMEMORYINFO
4507 PROCESS_MEMORY_COUNTERS memcounters;
4508 proc = GetCurrentProcess();
4509 memcounters.cb = sizeof(memcounters);
4510 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4511 return memcounters.PeakPagefileUsage;
4517 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4521 int main(int argc, char **argv)
4528 register_exit(ffmpeg_cleanup);
4530 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4532 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4533 parse_loglevel(argc, argv, options);
4535 if(argc>1 && !strcmp(argv[1], "-d")){
4537 av_log_set_callback(log_callback_null);
4542 avcodec_register_all();
4544 avdevice_register_all();
4546 avfilter_register_all();
4548 avformat_network_init();
4550 show_banner(argc, argv, options);
4552 /* parse options and open all input/output files */
4553 ret = ffmpeg_parse_options(argc, argv);
4557 if (nb_output_files <= 0 && nb_input_files == 0) {
4559 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4563 /* file converter / grab */
4564 if (nb_output_files <= 0) {
4565 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4569 // if (nb_input_files == 0) {
4570 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4574 for (i = 0; i < nb_output_files; i++) {
4575 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4579 current_time = ti = getutime();
4580 if (transcode() < 0)
4582 ti = getutime() - ti;
4584 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4586 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4587 decode_error_stat[0], decode_error_stat[1]);
4588 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4591 exit_program(received_nb_signals ? 255 : main_return_code);
4592 return main_return_code;