2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]);
484 av_freep(&fg->outputs);
485 av_freep(&fg->graph_desc);
487 av_freep(&filtergraphs[i]);
489 av_freep(&filtergraphs);
491 av_freep(&subtitle_out);
494 for (i = 0; i < nb_output_files; i++) {
495 OutputFile *of = output_files[i];
500 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
502 avformat_free_context(s);
503 av_dict_free(&of->opts);
505 av_freep(&output_files[i]);
507 for (i = 0; i < nb_output_streams; i++) {
508 OutputStream *ost = output_streams[i];
513 for (j = 0; j < ost->nb_bitstream_filters; j++)
514 av_bsf_free(&ost->bsf_ctx[j]);
515 av_freep(&ost->bsf_ctx);
516 av_freep(&ost->bsf_extradata_updated);
518 av_frame_free(&ost->filtered_frame);
519 av_frame_free(&ost->last_frame);
520 av_dict_free(&ost->encoder_opts);
522 av_parser_close(ost->parser);
523 avcodec_free_context(&ost->parser_avctx);
525 av_freep(&ost->forced_keyframes);
526 av_expr_free(ost->forced_keyframes_pexpr);
527 av_freep(&ost->avfilter);
528 av_freep(&ost->logfile_prefix);
530 av_freep(&ost->audio_channels_map);
531 ost->audio_channels_mapped = 0;
533 av_dict_free(&ost->sws_dict);
535 avcodec_free_context(&ost->enc_ctx);
536 avcodec_parameters_free(&ost->ref_par);
538 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
540 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
541 av_packet_unref(&pkt);
543 av_fifo_freep(&ost->muxing_queue);
545 av_freep(&output_streams[i]);
548 free_input_threads();
550 for (i = 0; i < nb_input_files; i++) {
551 avformat_close_input(&input_files[i]->ctx);
552 av_freep(&input_files[i]);
554 for (i = 0; i < nb_input_streams; i++) {
555 InputStream *ist = input_streams[i];
557 av_frame_free(&ist->decoded_frame);
558 av_frame_free(&ist->filter_frame);
559 av_dict_free(&ist->decoder_opts);
560 avsubtitle_free(&ist->prev_sub.subtitle);
561 av_frame_free(&ist->sub2video.frame);
562 av_freep(&ist->filters);
563 av_freep(&ist->hwaccel_device);
564 av_freep(&ist->dts_buffer);
566 avcodec_free_context(&ist->dec_ctx);
568 av_freep(&input_streams[i]);
572 if (fclose(vstats_file))
573 av_log(NULL, AV_LOG_ERROR,
574 "Error closing vstats file, loss of information possible: %s\n",
575 av_err2str(AVERROR(errno)));
577 av_freep(&vstats_filename);
579 av_freep(&input_streams);
580 av_freep(&input_files);
581 av_freep(&output_streams);
582 av_freep(&output_files);
586 avformat_network_deinit();
588 if (received_sigterm) {
589 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
590 (int) received_sigterm);
591 } else if (ret && transcode_init_done) {
592 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
598 void remove_avoptions(AVDictionary **a, AVDictionary *b)
600 AVDictionaryEntry *t = NULL;
602 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
603 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
607 void assert_avoptions(AVDictionary *m)
609 AVDictionaryEntry *t;
610 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
611 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
616 static void abort_codec_experimental(AVCodec *c, int encoder)
621 static void update_benchmark(const char *fmt, ...)
623 if (do_benchmark_all) {
624 int64_t t = getutime();
630 vsnprintf(buf, sizeof(buf), fmt, va);
632 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
638 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
641 for (i = 0; i < nb_output_streams; i++) {
642 OutputStream *ost2 = output_streams[i];
643 ost2->finished |= ost == ost2 ? this_stream : others;
647 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
649 AVFormatContext *s = of->ctx;
650 AVStream *st = ost->st;
653 if (!of->header_written) {
655 /* the muxer is not initialized yet, buffer the packet */
656 if (!av_fifo_space(ost->muxing_queue)) {
657 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
658 ost->max_muxing_queue_size);
659 if (new_size <= av_fifo_size(ost->muxing_queue)) {
660 av_log(NULL, AV_LOG_ERROR,
661 "Too many packets buffered for output stream %d:%d.\n",
662 ost->file_index, ost->st->index);
665 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
669 av_packet_move_ref(&tmp_pkt, pkt);
670 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
674 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
675 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
676 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
679 * Audio encoders may split the packets -- #frames in != #packets out.
680 * But there is no reordering, so we can limit the number of output packets
681 * by simply dropping them here.
682 * Counting encoded video frames needs to be done separately because of
683 * reordering, see do_video_out()
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
692 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
694 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
696 ost->quality = sd ? AV_RL32(sd) : -1;
697 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
699 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
701 ost->error[i] = AV_RL64(sd + 8 + 8*i);
706 if (ost->frame_rate.num && ost->is_cfr) {
707 if (pkt->duration > 0)
708 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
709 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
714 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
715 if (pkt->dts != AV_NOPTS_VALUE &&
716 pkt->pts != AV_NOPTS_VALUE &&
717 pkt->dts > pkt->pts) {
718 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
720 ost->file_index, ost->st->index);
722 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
723 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
724 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
726 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
727 pkt->dts != AV_NOPTS_VALUE &&
728 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
729 ost->last_mux_dts != AV_NOPTS_VALUE) {
730 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
731 if (pkt->dts < max) {
732 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
733 av_log(s, loglevel, "Non-monotonous DTS in output stream "
734 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
735 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
737 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
740 av_log(s, loglevel, "changing to %"PRId64". This may result "
741 "in incorrect timestamps in the output file.\n",
743 if (pkt->pts >= pkt->dts)
744 pkt->pts = FFMAX(pkt->pts, max);
749 ost->last_mux_dts = pkt->dts;
751 ost->data_size += pkt->size;
752 ost->packets_written++;
754 pkt->stream_index = ost->index;
757 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
758 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
759 av_get_media_type_string(ost->enc_ctx->codec_type),
760 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
761 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
766 ret = av_interleaved_write_frame(s, pkt);
768 print_error("av_interleaved_write_frame()", ret);
769 main_return_code = 1;
770 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
772 av_packet_unref(pkt);
775 static void close_output_stream(OutputStream *ost)
777 OutputFile *of = output_files[ost->file_index];
779 ost->finished |= ENCODER_FINISHED;
781 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
782 of->recording_time = FFMIN(of->recording_time, end);
786 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
790 /* apply the output bitstream filters, if any */
791 if (ost->nb_bitstream_filters) {
794 av_packet_split_side_data(pkt);
795 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
801 /* get a packet from the previous filter up the chain */
802 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
803 if (ret == AVERROR(EAGAIN)) {
809 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
810 * the api states this shouldn't happen after init(). Propagate it here to the
811 * muxer and to the next filters in the chain to workaround this.
812 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
813 * par_out->extradata and adapt muxers accordingly to get rid of this. */
814 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
815 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
818 ost->bsf_extradata_updated[idx - 1] |= 1;
821 /* send it to the next filter down the chain or to the muxer */
822 if (idx < ost->nb_bitstream_filters) {
823 /* HACK/FIXME! - See above */
824 if (!(ost->bsf_extradata_updated[idx] & 2)) {
825 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
828 ost->bsf_extradata_updated[idx] |= 2;
830 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
835 write_packet(of, pkt, ost);
838 write_packet(of, pkt, ost);
841 if (ret < 0 && ret != AVERROR_EOF) {
842 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
843 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
849 static int check_recording_time(OutputStream *ost)
851 OutputFile *of = output_files[ost->file_index];
853 if (of->recording_time != INT64_MAX &&
854 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
855 AV_TIME_BASE_Q) >= 0) {
856 close_output_stream(ost);
862 static void do_audio_out(OutputFile *of, OutputStream *ost,
865 AVCodecContext *enc = ost->enc_ctx;
869 av_init_packet(&pkt);
873 if (!check_recording_time(ost))
876 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
877 frame->pts = ost->sync_opts;
878 ost->sync_opts = frame->pts + frame->nb_samples;
879 ost->samples_encoded += frame->nb_samples;
880 ost->frames_encoded++;
882 av_assert0(pkt.size || !pkt.data);
883 update_benchmark(NULL);
885 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
886 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
887 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
888 enc->time_base.num, enc->time_base.den);
891 ret = avcodec_send_frame(enc, frame);
896 ret = avcodec_receive_packet(enc, &pkt);
897 if (ret == AVERROR(EAGAIN))
902 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
904 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
907 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
908 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
909 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
910 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
913 output_packet(of, &pkt, ost);
918 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
922 static void do_subtitle_out(OutputFile *of,
926 int subtitle_out_max_size = 1024 * 1024;
927 int subtitle_out_size, nb, i;
932 if (sub->pts == AV_NOPTS_VALUE) {
933 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
942 subtitle_out = av_malloc(subtitle_out_max_size);
944 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
949 /* Note: DVB subtitle need one packet to draw them and one other
950 packet to clear them */
951 /* XXX: signal it in the codec context ? */
952 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
957 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
959 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
960 pts -= output_files[ost->file_index]->start_time;
961 for (i = 0; i < nb; i++) {
962 unsigned save_num_rects = sub->num_rects;
964 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
965 if (!check_recording_time(ost))
969 // start_display_time is required to be 0
970 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
971 sub->end_display_time -= sub->start_display_time;
972 sub->start_display_time = 0;
976 ost->frames_encoded++;
978 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
979 subtitle_out_max_size, sub);
981 sub->num_rects = save_num_rects;
982 if (subtitle_out_size < 0) {
983 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
987 av_init_packet(&pkt);
988 pkt.data = subtitle_out;
989 pkt.size = subtitle_out_size;
990 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
991 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
992 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
993 /* XXX: the pts correction is handled here. Maybe handling
994 it in the codec would be better */
996 pkt.pts += 90 * sub->start_display_time;
998 pkt.pts += 90 * sub->end_display_time;
1001 output_packet(of, &pkt, ost);
1005 static void do_video_out(OutputFile *of,
1007 AVFrame *next_picture,
1010 int ret, format_video_sync;
1012 AVCodecContext *enc = ost->enc_ctx;
1013 AVCodecParameters *mux_par = ost->st->codecpar;
1014 int nb_frames, nb0_frames, i;
1015 double delta, delta0;
1016 double duration = 0;
1018 InputStream *ist = NULL;
1019 AVFilterContext *filter = ost->filter->filter;
1021 if (ost->source_index >= 0)
1022 ist = input_streams[ost->source_index];
1024 if (filter->inputs[0]->frame_rate.num > 0 &&
1025 filter->inputs[0]->frame_rate.den > 0)
1026 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1028 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1029 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1031 if (!ost->filters_script &&
1035 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1036 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1039 if (!next_picture) {
1041 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1042 ost->last_nb0_frames[1],
1043 ost->last_nb0_frames[2]);
1045 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1046 delta = delta0 + duration;
1048 /* by default, we output a single frame */
1049 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1052 format_video_sync = video_sync_method;
1053 if (format_video_sync == VSYNC_AUTO) {
1054 if(!strcmp(of->ctx->oformat->name, "avi")) {
1055 format_video_sync = VSYNC_VFR;
1057 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1059 && format_video_sync == VSYNC_CFR
1060 && input_files[ist->file_index]->ctx->nb_streams == 1
1061 && input_files[ist->file_index]->input_ts_offset == 0) {
1062 format_video_sync = VSYNC_VSCFR;
1064 if (format_video_sync == VSYNC_CFR && copy_ts) {
1065 format_video_sync = VSYNC_VSCFR;
1068 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1072 format_video_sync != VSYNC_PASSTHROUGH &&
1073 format_video_sync != VSYNC_DROP) {
1074 if (delta0 < -0.6) {
1075 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1077 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1078 sync_ipts = ost->sync_opts;
1083 switch (format_video_sync) {
1085 if (ost->frame_number == 0 && delta0 >= 0.5) {
1086 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1089 ost->sync_opts = lrint(sync_ipts);
1092 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1093 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1095 } else if (delta < -1.1)
1097 else if (delta > 1.1) {
1098 nb_frames = lrintf(delta);
1100 nb0_frames = lrintf(delta0 - 0.6);
1106 else if (delta > 0.6)
1107 ost->sync_opts = lrint(sync_ipts);
1110 case VSYNC_PASSTHROUGH:
1111 ost->sync_opts = lrint(sync_ipts);
1118 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1119 nb0_frames = FFMIN(nb0_frames, nb_frames);
1121 memmove(ost->last_nb0_frames + 1,
1122 ost->last_nb0_frames,
1123 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1124 ost->last_nb0_frames[0] = nb0_frames;
1126 if (nb0_frames == 0 && ost->last_dropped) {
1128 av_log(NULL, AV_LOG_VERBOSE,
1129 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1130 ost->frame_number, ost->st->index, ost->last_frame->pts);
1132 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1133 if (nb_frames > dts_error_threshold * 30) {
1134 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1138 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1139 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1140 if (nb_frames_dup > dup_warning) {
1141 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1145 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1147 /* duplicates frame if needed */
1148 for (i = 0; i < nb_frames; i++) {
1149 AVFrame *in_picture;
1150 av_init_packet(&pkt);
1154 if (i < nb0_frames && ost->last_frame) {
1155 in_picture = ost->last_frame;
1157 in_picture = next_picture;
1162 in_picture->pts = ost->sync_opts;
1165 if (!check_recording_time(ost))
1167 if (ost->frame_number >= ost->max_frames)
1171 #if FF_API_LAVF_FMT_RAWPICTURE
1172 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1173 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1174 /* raw pictures are written as AVPicture structure to
1175 avoid any copies. We support temporarily the older
1177 if (in_picture->interlaced_frame)
1178 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1180 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1181 pkt.data = (uint8_t *)in_picture;
1182 pkt.size = sizeof(AVPicture);
1183 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1184 pkt.flags |= AV_PKT_FLAG_KEY;
1186 output_packet(of, &pkt, ost);
1190 int forced_keyframe = 0;
1193 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1194 ost->top_field_first >= 0)
1195 in_picture->top_field_first = !!ost->top_field_first;
1197 if (in_picture->interlaced_frame) {
1198 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1199 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1201 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1203 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1205 in_picture->quality = enc->global_quality;
1206 in_picture->pict_type = 0;
1208 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1209 in_picture->pts * av_q2d(enc->time_base) : NAN;
1210 if (ost->forced_kf_index < ost->forced_kf_count &&
1211 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1212 ost->forced_kf_index++;
1213 forced_keyframe = 1;
1214 } else if (ost->forced_keyframes_pexpr) {
1216 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1217 res = av_expr_eval(ost->forced_keyframes_pexpr,
1218 ost->forced_keyframes_expr_const_values, NULL);
1219 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1220 ost->forced_keyframes_expr_const_values[FKF_N],
1221 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1222 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1223 ost->forced_keyframes_expr_const_values[FKF_T],
1224 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1227 forced_keyframe = 1;
1228 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1229 ost->forced_keyframes_expr_const_values[FKF_N];
1230 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1231 ost->forced_keyframes_expr_const_values[FKF_T];
1232 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1235 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1236 } else if ( ost->forced_keyframes
1237 && !strncmp(ost->forced_keyframes, "source", 6)
1238 && in_picture->key_frame==1) {
1239 forced_keyframe = 1;
1242 if (forced_keyframe) {
1243 in_picture->pict_type = AV_PICTURE_TYPE_I;
1244 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1247 update_benchmark(NULL);
1249 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1250 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1251 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1252 enc->time_base.num, enc->time_base.den);
1255 ost->frames_encoded++;
1257 ret = avcodec_send_frame(enc, in_picture);
1262 ret = avcodec_receive_packet(enc, &pkt);
1263 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1264 if (ret == AVERROR(EAGAIN))
1270 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1271 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1272 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1273 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1276 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1277 pkt.pts = ost->sync_opts;
1279 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1282 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1283 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1284 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1285 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1288 frame_size = pkt.size;
1289 output_packet(of, &pkt, ost);
1291 /* if two pass, output log */
1292 if (ost->logfile && enc->stats_out) {
1293 fprintf(ost->logfile, "%s", enc->stats_out);
1299 * For video, number of frames in == number of packets out.
1300 * But there may be reordering, so we can't throw away frames on encoder
1301 * flush, we need to limit them here, before they go into encoder.
1303 ost->frame_number++;
1305 if (vstats_filename && frame_size)
1306 do_video_stats(ost, frame_size);
1309 if (!ost->last_frame)
1310 ost->last_frame = av_frame_alloc();
1311 av_frame_unref(ost->last_frame);
1312 if (next_picture && ost->last_frame)
1313 av_frame_ref(ost->last_frame, next_picture);
1315 av_frame_free(&ost->last_frame);
1319 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1323 static double psnr(double d)
1325 return -10.0 * log10(d);
1328 static void do_video_stats(OutputStream *ost, int frame_size)
1330 AVCodecContext *enc;
1332 double ti1, bitrate, avg_bitrate;
1334 /* this is executed just the first time do_video_stats is called */
1336 vstats_file = fopen(vstats_filename, "w");
1344 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1345 frame_number = ost->st->nb_frames;
1346 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1347 ost->quality / (float)FF_QP2LAMBDA);
1349 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1350 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1352 fprintf(vstats_file,"f_size= %6d ", frame_size);
1353 /* compute pts value */
1354 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1358 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1359 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1360 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1361 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1362 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1366 static void finish_output_stream(OutputStream *ost)
1368 OutputFile *of = output_files[ost->file_index];
1371 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1374 for (i = 0; i < of->ctx->nb_streams; i++)
1375 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1380 * Get and encode new output from any of the filtergraphs, without causing
1383 * @return 0 for success, <0 for severe errors
1385 static int reap_filters(int flush)
1387 AVFrame *filtered_frame = NULL;
1390 /* Reap all buffers present in the buffer sinks */
1391 for (i = 0; i < nb_output_streams; i++) {
1392 OutputStream *ost = output_streams[i];
1393 OutputFile *of = output_files[ost->file_index];
1394 AVFilterContext *filter;
1395 AVCodecContext *enc = ost->enc_ctx;
1400 filter = ost->filter->filter;
1402 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1403 return AVERROR(ENOMEM);
1405 filtered_frame = ost->filtered_frame;
1408 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1409 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1410 AV_BUFFERSINK_FLAG_NO_REQUEST);
1412 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1413 av_log(NULL, AV_LOG_WARNING,
1414 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1415 } else if (flush && ret == AVERROR_EOF) {
1416 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1417 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1421 if (ost->finished) {
1422 av_frame_unref(filtered_frame);
1425 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1426 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1427 AVRational tb = enc->time_base;
1428 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1430 tb.den <<= extra_bits;
1432 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1433 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1434 float_pts /= 1 << extra_bits;
1435 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1436 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1438 filtered_frame->pts =
1439 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1440 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1442 //if (ost->source_index >= 0)
1443 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1445 switch (filter->inputs[0]->type) {
1446 case AVMEDIA_TYPE_VIDEO:
1447 if (!ost->frame_aspect_ratio.num)
1448 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1451 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1452 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1454 enc->time_base.num, enc->time_base.den);
1457 do_video_out(of, ost, filtered_frame, float_pts);
1459 case AVMEDIA_TYPE_AUDIO:
1460 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1461 enc->channels != av_frame_get_channels(filtered_frame)) {
1462 av_log(NULL, AV_LOG_ERROR,
1463 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1466 do_audio_out(of, ost, filtered_frame);
1469 // TODO support subtitle filters
1473 av_frame_unref(filtered_frame);
1480 static void print_final_stats(int64_t total_size)
1482 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1483 uint64_t subtitle_size = 0;
1484 uint64_t data_size = 0;
1485 float percent = -1.0;
1489 for (i = 0; i < nb_output_streams; i++) {
1490 OutputStream *ost = output_streams[i];
1491 switch (ost->enc_ctx->codec_type) {
1492 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1493 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1494 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1495 default: other_size += ost->data_size; break;
1497 extra_size += ost->enc_ctx->extradata_size;
1498 data_size += ost->data_size;
1499 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1500 != AV_CODEC_FLAG_PASS1)
1504 if (data_size && total_size>0 && total_size >= data_size)
1505 percent = 100.0 * (total_size - data_size) / data_size;
1507 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1508 video_size / 1024.0,
1509 audio_size / 1024.0,
1510 subtitle_size / 1024.0,
1511 other_size / 1024.0,
1512 extra_size / 1024.0);
1514 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1516 av_log(NULL, AV_LOG_INFO, "unknown");
1517 av_log(NULL, AV_LOG_INFO, "\n");
1519 /* print verbose per-stream stats */
1520 for (i = 0; i < nb_input_files; i++) {
1521 InputFile *f = input_files[i];
1522 uint64_t total_packets = 0, total_size = 0;
1524 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1525 i, f->ctx->filename);
1527 for (j = 0; j < f->nb_streams; j++) {
1528 InputStream *ist = input_streams[f->ist_index + j];
1529 enum AVMediaType type = ist->dec_ctx->codec_type;
1531 total_size += ist->data_size;
1532 total_packets += ist->nb_packets;
1534 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1535 i, j, media_type_string(type));
1536 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1537 ist->nb_packets, ist->data_size);
1539 if (ist->decoding_needed) {
1540 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1541 ist->frames_decoded);
1542 if (type == AVMEDIA_TYPE_AUDIO)
1543 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1544 av_log(NULL, AV_LOG_VERBOSE, "; ");
1547 av_log(NULL, AV_LOG_VERBOSE, "\n");
1550 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1551 total_packets, total_size);
1554 for (i = 0; i < nb_output_files; i++) {
1555 OutputFile *of = output_files[i];
1556 uint64_t total_packets = 0, total_size = 0;
1558 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1559 i, of->ctx->filename);
1561 for (j = 0; j < of->ctx->nb_streams; j++) {
1562 OutputStream *ost = output_streams[of->ost_index + j];
1563 enum AVMediaType type = ost->enc_ctx->codec_type;
1565 total_size += ost->data_size;
1566 total_packets += ost->packets_written;
1568 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1569 i, j, media_type_string(type));
1570 if (ost->encoding_needed) {
1571 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1572 ost->frames_encoded);
1573 if (type == AVMEDIA_TYPE_AUDIO)
1574 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1575 av_log(NULL, AV_LOG_VERBOSE, "; ");
1578 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1579 ost->packets_written, ost->data_size);
1581 av_log(NULL, AV_LOG_VERBOSE, "\n");
1584 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1585 total_packets, total_size);
1587 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1588 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1590 av_log(NULL, AV_LOG_WARNING, "\n");
1592 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1597 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1600 AVBPrint buf_script;
1602 AVFormatContext *oc;
1604 AVCodecContext *enc;
1605 int frame_number, vid, i;
1608 int64_t pts = INT64_MIN + 1;
1609 static int64_t last_time = -1;
1610 static int qp_histogram[52];
1611 int hours, mins, secs, us;
1615 if (!print_stats && !is_last_report && !progress_avio)
1618 if (!is_last_report) {
1619 if (last_time == -1) {
1620 last_time = cur_time;
1623 if ((cur_time - last_time) < 500000)
1625 last_time = cur_time;
1628 t = (cur_time-timer_start) / 1000000.0;
1631 oc = output_files[0]->ctx;
1633 total_size = avio_size(oc->pb);
1634 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1635 total_size = avio_tell(oc->pb);
1639 av_bprint_init(&buf_script, 0, 1);
1640 for (i = 0; i < nb_output_streams; i++) {
1642 ost = output_streams[i];
1644 if (!ost->stream_copy)
1645 q = ost->quality / (float) FF_QP2LAMBDA;
1647 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1648 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1649 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1650 ost->file_index, ost->index, q);
1652 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1655 frame_number = ost->frame_number;
1656 fps = t > 1 ? frame_number / t : 0;
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1658 frame_number, fps < 9.95, fps, q);
1659 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1660 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1661 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1662 ost->file_index, ost->index, q);
1664 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1668 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1670 for (j = 0; j < 32; j++)
1671 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1674 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1676 double error, error_sum = 0;
1677 double scale, scale_sum = 0;
1679 char type[3] = { 'Y','U','V' };
1680 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1681 for (j = 0; j < 3; j++) {
1682 if (is_last_report) {
1683 error = enc->error[j];
1684 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1686 error = ost->error[j];
1687 scale = enc->width * enc->height * 255.0 * 255.0;
1693 p = psnr(error / scale);
1694 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1695 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1696 ost->file_index, ost->index, type[j] | 32, p);
1698 p = psnr(error_sum / scale_sum);
1699 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1700 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1701 ost->file_index, ost->index, p);
1705 /* compute min output value */
1706 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1707 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1708 ost->st->time_base, AV_TIME_BASE_Q));
1710 nb_frames_drop += ost->last_dropped;
1713 secs = FFABS(pts) / AV_TIME_BASE;
1714 us = FFABS(pts) % AV_TIME_BASE;
1720 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1721 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1723 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1725 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1726 "size=%8.0fkB time=", total_size / 1024.0);
1728 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1729 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1730 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1731 (100 * us) / AV_TIME_BASE);
1734 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1735 av_bprintf(&buf_script, "bitrate=N/A\n");
1737 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1738 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1741 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1742 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1743 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1744 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1745 hours, mins, secs, us);
1747 if (nb_frames_dup || nb_frames_drop)
1748 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1749 nb_frames_dup, nb_frames_drop);
1750 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1751 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1754 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1755 av_bprintf(&buf_script, "speed=N/A\n");
1757 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1758 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1761 if (print_stats || is_last_report) {
1762 const char end = is_last_report ? '\n' : '\r';
1763 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1764 fprintf(stderr, "%s %c", buf, end);
1766 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1771 if (progress_avio) {
1772 av_bprintf(&buf_script, "progress=%s\n",
1773 is_last_report ? "end" : "continue");
1774 avio_write(progress_avio, buf_script.str,
1775 FFMIN(buf_script.len, buf_script.size - 1));
1776 avio_flush(progress_avio);
1777 av_bprint_finalize(&buf_script, NULL);
1778 if (is_last_report) {
1779 if ((ret = avio_closep(&progress_avio)) < 0)
1780 av_log(NULL, AV_LOG_ERROR,
1781 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1786 print_final_stats(total_size);
1789 static void flush_encoders(void)
1793 for (i = 0; i < nb_output_streams; i++) {
1794 OutputStream *ost = output_streams[i];
1795 AVCodecContext *enc = ost->enc_ctx;
1796 OutputFile *of = output_files[ost->file_index];
1797 int stop_encoding = 0;
1799 if (!ost->encoding_needed)
1802 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1804 #if FF_API_LAVF_FMT_RAWPICTURE
1805 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1809 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1812 avcodec_send_frame(enc, NULL);
1815 const char *desc = NULL;
1817 switch (enc->codec_type) {
1818 case AVMEDIA_TYPE_AUDIO:
1821 case AVMEDIA_TYPE_VIDEO:
1831 av_init_packet(&pkt);
1835 update_benchmark(NULL);
1836 ret = avcodec_receive_packet(enc, &pkt);
1837 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1838 if (ret < 0 && ret != AVERROR_EOF) {
1839 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1844 if (ost->logfile && enc->stats_out) {
1845 fprintf(ost->logfile, "%s", enc->stats_out);
1847 if (ret == AVERROR_EOF) {
1851 if (ost->finished & MUXER_FINISHED) {
1852 av_packet_unref(&pkt);
1855 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1856 pkt_size = pkt.size;
1857 output_packet(of, &pkt, ost);
1858 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1859 do_video_stats(ost, pkt_size);
1870 * Check whether a packet from ist should be written into ost at this time
1872 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1874 OutputFile *of = output_files[ost->file_index];
1875 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1877 if (ost->source_index != ist_index)
1883 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1889 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1891 OutputFile *of = output_files[ost->file_index];
1892 InputFile *f = input_files [ist->file_index];
1893 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1894 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1898 av_init_packet(&opkt);
1900 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1901 !ost->copy_initial_nonkeyframes)
1904 if (!ost->frame_number && !ost->copy_prior_start) {
1905 int64_t comp_start = start_time;
1906 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1907 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1908 if (pkt->pts == AV_NOPTS_VALUE ?
1909 ist->pts < comp_start :
1910 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1914 if (of->recording_time != INT64_MAX &&
1915 ist->pts >= of->recording_time + start_time) {
1916 close_output_stream(ost);
1920 if (f->recording_time != INT64_MAX) {
1921 start_time = f->ctx->start_time;
1922 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1923 start_time += f->start_time;
1924 if (ist->pts >= f->recording_time + start_time) {
1925 close_output_stream(ost);
1930 /* force the input stream PTS */
1931 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1934 if (pkt->pts != AV_NOPTS_VALUE)
1935 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1937 opkt.pts = AV_NOPTS_VALUE;
1939 if (pkt->dts == AV_NOPTS_VALUE)
1940 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1942 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1943 opkt.dts -= ost_tb_start_time;
1945 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1946 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1948 duration = ist->dec_ctx->frame_size;
1949 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1950 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1951 ost->st->time_base) - ost_tb_start_time;
1954 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1955 opkt.flags = pkt->flags;
1956 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1957 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1958 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1959 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1960 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1962 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1963 &opkt.data, &opkt.size,
1964 pkt->data, pkt->size,
1965 pkt->flags & AV_PKT_FLAG_KEY);
1967 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1972 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1977 opkt.data = pkt->data;
1978 opkt.size = pkt->size;
1980 av_copy_packet_side_data(&opkt, pkt);
1982 #if FF_API_LAVF_FMT_RAWPICTURE
1983 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1984 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1985 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1986 /* store AVPicture in AVPacket, as expected by the output format */
1987 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1989 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1993 opkt.data = (uint8_t *)&pict;
1994 opkt.size = sizeof(AVPicture);
1995 opkt.flags |= AV_PKT_FLAG_KEY;
1999 output_packet(of, &opkt, ost);
2002 int guess_input_channel_layout(InputStream *ist)
2004 AVCodecContext *dec = ist->dec_ctx;
2006 if (!dec->channel_layout) {
2007 char layout_name[256];
2009 if (dec->channels > ist->guess_layout_max)
2011 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2012 if (!dec->channel_layout)
2014 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2015 dec->channels, dec->channel_layout);
2016 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2017 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2022 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2024 if (*got_output || ret<0)
2025 decode_error_stat[ret<0] ++;
2027 if (ret < 0 && exit_on_error)
2030 if (exit_on_error && *got_output && ist) {
2031 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2032 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2038 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2039 // There is the following difference: if you got a frame, you must call
2040 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2041 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2042 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2049 ret = avcodec_send_packet(avctx, pkt);
2050 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2051 // decoded frames with avcodec_receive_frame() until done.
2052 if (ret < 0 && ret != AVERROR_EOF)
2056 ret = avcodec_receive_frame(avctx, frame);
2057 if (ret < 0 && ret != AVERROR(EAGAIN))
2065 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2067 AVFrame *decoded_frame, *f;
2068 AVCodecContext *avctx = ist->dec_ctx;
2069 int i, ret, err = 0, resample_changed;
2070 AVRational decoded_frame_tb;
2072 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2073 return AVERROR(ENOMEM);
2074 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2075 return AVERROR(ENOMEM);
2076 decoded_frame = ist->decoded_frame;
2078 update_benchmark(NULL);
2079 ret = decode(avctx, decoded_frame, got_output, pkt);
2080 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2082 if (ret >= 0 && avctx->sample_rate <= 0) {
2083 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2084 ret = AVERROR_INVALIDDATA;
2087 if (ret != AVERROR_EOF)
2088 check_decode_result(ist, got_output, ret);
2090 if (!*got_output || ret < 0)
2093 ist->samples_decoded += decoded_frame->nb_samples;
2094 ist->frames_decoded++;
2097 /* increment next_dts to use for the case where the input stream does not
2098 have timestamps or there are multiple frames in the packet */
2099 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2101 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2105 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2106 ist->resample_channels != avctx->channels ||
2107 ist->resample_channel_layout != decoded_frame->channel_layout ||
2108 ist->resample_sample_rate != decoded_frame->sample_rate;
2109 if (resample_changed) {
2110 char layout1[64], layout2[64];
2112 if (!guess_input_channel_layout(ist)) {
2113 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2114 "layout for Input Stream #%d.%d\n", ist->file_index,
2118 decoded_frame->channel_layout = avctx->channel_layout;
2120 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2121 ist->resample_channel_layout);
2122 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2123 decoded_frame->channel_layout);
2125 av_log(NULL, AV_LOG_INFO,
2126 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2127 ist->file_index, ist->st->index,
2128 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2129 ist->resample_channels, layout1,
2130 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2131 avctx->channels, layout2);
2133 ist->resample_sample_fmt = decoded_frame->format;
2134 ist->resample_sample_rate = decoded_frame->sample_rate;
2135 ist->resample_channel_layout = decoded_frame->channel_layout;
2136 ist->resample_channels = avctx->channels;
2138 for (i = 0; i < ist->nb_filters; i++) {
2139 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2141 av_log(NULL, AV_LOG_ERROR,
2142 "Error reconfiguring input stream %d:%d filter %d\n",
2143 ist->file_index, ist->st->index, i);
2148 for (i = 0; i < nb_filtergraphs; i++)
2149 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2150 FilterGraph *fg = filtergraphs[i];
2151 if (configure_filtergraph(fg) < 0) {
2152 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2158 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2159 decoded_frame_tb = ist->st->time_base;
2160 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2161 decoded_frame->pts = pkt->pts;
2162 decoded_frame_tb = ist->st->time_base;
2164 decoded_frame->pts = ist->dts;
2165 decoded_frame_tb = AV_TIME_BASE_Q;
2167 if (decoded_frame->pts != AV_NOPTS_VALUE)
2168 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2169 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2170 (AVRational){1, avctx->sample_rate});
2171 ist->nb_samples = decoded_frame->nb_samples;
2172 for (i = 0; i < ist->nb_filters; i++) {
2173 if (i < ist->nb_filters - 1) {
2174 f = ist->filter_frame;
2175 err = av_frame_ref(f, decoded_frame);
2180 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2181 AV_BUFFERSRC_FLAG_PUSH);
2182 if (err == AVERROR_EOF)
2183 err = 0; /* ignore */
2187 decoded_frame->pts = AV_NOPTS_VALUE;
2190 av_frame_unref(ist->filter_frame);
2191 av_frame_unref(decoded_frame);
2192 return err < 0 ? err : ret;
2195 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2197 AVFrame *decoded_frame, *f;
2198 int i, ret = 0, err = 0, resample_changed;
2199 int64_t best_effort_timestamp;
2200 int64_t dts = AV_NOPTS_VALUE;
2201 AVRational *frame_sample_aspect;
2204 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2205 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2207 if (!eof && pkt && pkt->size == 0)
2210 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2211 return AVERROR(ENOMEM);
2212 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2213 return AVERROR(ENOMEM);
2214 decoded_frame = ist->decoded_frame;
2215 if (ist->dts != AV_NOPTS_VALUE)
2216 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2219 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2222 // The old code used to set dts on the drain packet, which does not work
2223 // with the new API anymore.
2225 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2227 return AVERROR(ENOMEM);
2228 ist->dts_buffer = new;
2229 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2232 update_benchmark(NULL);
2233 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2234 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2236 // The following line may be required in some cases where there is no parser
2237 // or the parser does not has_b_frames correctly
2238 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2239 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2240 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2242 av_log(ist->dec_ctx, AV_LOG_WARNING,
2243 "video_delay is larger in decoder than demuxer %d > %d.\n"
2244 "If you want to help, upload a sample "
2245 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2246 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2247 ist->dec_ctx->has_b_frames,
2248 ist->st->codecpar->video_delay);
2251 if (ret != AVERROR_EOF)
2252 check_decode_result(ist, got_output, ret);
2254 if (*got_output && ret >= 0) {
2255 if (ist->dec_ctx->width != decoded_frame->width ||
2256 ist->dec_ctx->height != decoded_frame->height ||
2257 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2258 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2259 decoded_frame->width,
2260 decoded_frame->height,
2261 decoded_frame->format,
2262 ist->dec_ctx->width,
2263 ist->dec_ctx->height,
2264 ist->dec_ctx->pix_fmt);
2268 if (!*got_output || ret < 0)
2271 if(ist->top_field_first>=0)
2272 decoded_frame->top_field_first = ist->top_field_first;
2274 ist->frames_decoded++;
2276 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2277 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2281 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2283 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2285 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2286 best_effort_timestamp = ist->dts_buffer[0];
2288 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2289 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2290 ist->nb_dts_buffer--;
2293 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2294 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2296 if (ts != AV_NOPTS_VALUE)
2297 ist->next_pts = ist->pts = ts;
2301 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2302 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2303 ist->st->index, av_ts2str(decoded_frame->pts),
2304 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2305 best_effort_timestamp,
2306 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2307 decoded_frame->key_frame, decoded_frame->pict_type,
2308 ist->st->time_base.num, ist->st->time_base.den);
2311 if (ist->st->sample_aspect_ratio.num)
2312 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2314 resample_changed = ist->resample_width != decoded_frame->width ||
2315 ist->resample_height != decoded_frame->height ||
2316 ist->resample_pix_fmt != decoded_frame->format;
2317 if (resample_changed) {
2318 av_log(NULL, AV_LOG_INFO,
2319 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2320 ist->file_index, ist->st->index,
2321 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2322 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2324 ist->resample_width = decoded_frame->width;
2325 ist->resample_height = decoded_frame->height;
2326 ist->resample_pix_fmt = decoded_frame->format;
2328 for (i = 0; i < ist->nb_filters; i++) {
2329 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2331 av_log(NULL, AV_LOG_ERROR,
2332 "Error reconfiguring input stream %d:%d filter %d\n",
2333 ist->file_index, ist->st->index, i);
2338 for (i = 0; i < nb_filtergraphs; i++) {
2339 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2340 configure_filtergraph(filtergraphs[i]) < 0) {
2341 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2347 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2348 for (i = 0; i < ist->nb_filters; i++) {
2349 if (!frame_sample_aspect->num)
2350 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2352 if (i < ist->nb_filters - 1) {
2353 f = ist->filter_frame;
2354 err = av_frame_ref(f, decoded_frame);
2359 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2360 if (err == AVERROR_EOF) {
2361 err = 0; /* ignore */
2362 } else if (err < 0) {
2363 av_log(NULL, AV_LOG_FATAL,
2364 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2370 av_frame_unref(ist->filter_frame);
2371 av_frame_unref(decoded_frame);
2372 return err < 0 ? err : ret;
2375 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2377 AVSubtitle subtitle;
2378 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2379 &subtitle, got_output, pkt);
2381 check_decode_result(NULL, got_output, ret);
2383 if (ret < 0 || !*got_output) {
2385 sub2video_flush(ist);
2389 if (ist->fix_sub_duration) {
2391 if (ist->prev_sub.got_output) {
2392 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2393 1000, AV_TIME_BASE);
2394 if (end < ist->prev_sub.subtitle.end_display_time) {
2395 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2396 "Subtitle duration reduced from %d to %d%s\n",
2397 ist->prev_sub.subtitle.end_display_time, end,
2398 end <= 0 ? ", dropping it" : "");
2399 ist->prev_sub.subtitle.end_display_time = end;
2402 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2403 FFSWAP(int, ret, ist->prev_sub.ret);
2404 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2412 sub2video_update(ist, &subtitle);
2414 if (!subtitle.num_rects)
2417 ist->frames_decoded++;
2419 for (i = 0; i < nb_output_streams; i++) {
2420 OutputStream *ost = output_streams[i];
2422 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2423 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2426 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2430 avsubtitle_free(&subtitle);
2434 static int send_filter_eof(InputStream *ist)
2437 for (i = 0; i < ist->nb_filters; i++) {
2438 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2445 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2446 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2450 int eof_reached = 0;
2453 if (!ist->saw_first_ts) {
2454 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2456 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2457 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2458 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2460 ist->saw_first_ts = 1;
2463 if (ist->next_dts == AV_NOPTS_VALUE)
2464 ist->next_dts = ist->dts;
2465 if (ist->next_pts == AV_NOPTS_VALUE)
2466 ist->next_pts = ist->pts;
2470 av_init_packet(&avpkt);
2477 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2478 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2479 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2480 ist->next_pts = ist->pts = ist->dts;
2483 // while we have more to decode or while the decoder did output something on EOF
2484 while (ist->decoding_needed) {
2488 ist->pts = ist->next_pts;
2489 ist->dts = ist->next_dts;
2491 switch (ist->dec_ctx->codec_type) {
2492 case AVMEDIA_TYPE_AUDIO:
2493 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2495 case AVMEDIA_TYPE_VIDEO:
2496 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2497 if (!repeating || !pkt || got_output) {
2498 if (pkt && pkt->duration) {
2499 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2500 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2501 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2502 duration = ((int64_t)AV_TIME_BASE *
2503 ist->dec_ctx->framerate.den * ticks) /
2504 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2507 if(ist->dts != AV_NOPTS_VALUE && duration) {
2508 ist->next_dts += duration;
2510 ist->next_dts = AV_NOPTS_VALUE;
2514 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2516 case AVMEDIA_TYPE_SUBTITLE:
2519 ret = transcode_subtitles(ist, &avpkt, &got_output);
2520 if (!pkt && ret >= 0)
2527 if (ret == AVERROR_EOF) {
2533 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2534 ist->file_index, ist->st->index, av_err2str(ret));
2537 // Decoding might not terminate if we're draining the decoder, and
2538 // the decoder keeps returning an error.
2539 // This should probably be considered a libavcodec issue.
2540 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2549 // During draining, we might get multiple output frames in this loop.
2550 // ffmpeg.c does not drain the filter chain on configuration changes,
2551 // which means if we send multiple frames at once to the filters, and
2552 // one of those frames changes configuration, the buffered frames will
2553 // be lost. This can upset certain FATE tests.
2554 // Decode only 1 frame per call on EOF to appease these FATE tests.
2555 // The ideal solution would be to rewrite decoding to use the new
2556 // decoding API in a better way.
2563 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2564 /* except when looping we need to flush but not to send an EOF */
2565 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2566 int ret = send_filter_eof(ist);
2568 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2573 /* handle stream copy */
2574 if (!ist->decoding_needed) {
2575 ist->dts = ist->next_dts;
2576 switch (ist->dec_ctx->codec_type) {
2577 case AVMEDIA_TYPE_AUDIO:
2578 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2579 ist->dec_ctx->sample_rate;
2581 case AVMEDIA_TYPE_VIDEO:
2582 if (ist->framerate.num) {
2583 // TODO: Remove work-around for c99-to-c89 issue 7
2584 AVRational time_base_q = AV_TIME_BASE_Q;
2585 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2586 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2587 } else if (pkt->duration) {
2588 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2589 } else if(ist->dec_ctx->framerate.num != 0) {
2590 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2591 ist->next_dts += ((int64_t)AV_TIME_BASE *
2592 ist->dec_ctx->framerate.den * ticks) /
2593 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2597 ist->pts = ist->dts;
2598 ist->next_pts = ist->next_dts;
2600 for (i = 0; pkt && i < nb_output_streams; i++) {
2601 OutputStream *ost = output_streams[i];
2603 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2606 do_streamcopy(ist, ost, pkt);
2609 return !eof_reached;
2612 static void print_sdp(void)
2617 AVIOContext *sdp_pb;
2618 AVFormatContext **avc;
2620 for (i = 0; i < nb_output_files; i++) {
2621 if (!output_files[i]->header_written)
2625 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2628 for (i = 0, j = 0; i < nb_output_files; i++) {
2629 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2630 avc[j] = output_files[i]->ctx;
2638 av_sdp_create(avc, j, sdp, sizeof(sdp));
2640 if (!sdp_filename) {
2641 printf("SDP:\n%s\n", sdp);
2644 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2645 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2647 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2648 avio_closep(&sdp_pb);
2649 av_freep(&sdp_filename);
2657 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2660 for (i = 0; hwaccels[i].name; i++)
2661 if (hwaccels[i].pix_fmt == pix_fmt)
2662 return &hwaccels[i];
2666 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2668 InputStream *ist = s->opaque;
2669 const enum AVPixelFormat *p;
2672 for (p = pix_fmts; *p != -1; p++) {
2673 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2674 const HWAccel *hwaccel;
2676 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2679 hwaccel = get_hwaccel(*p);
2681 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2682 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2685 ret = hwaccel->init(s);
2687 if (ist->hwaccel_id == hwaccel->id) {
2688 av_log(NULL, AV_LOG_FATAL,
2689 "%s hwaccel requested for input stream #%d:%d, "
2690 "but cannot be initialized.\n", hwaccel->name,
2691 ist->file_index, ist->st->index);
2692 return AV_PIX_FMT_NONE;
2697 if (ist->hw_frames_ctx) {
2698 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2699 if (!s->hw_frames_ctx)
2700 return AV_PIX_FMT_NONE;
2703 ist->active_hwaccel_id = hwaccel->id;
2704 ist->hwaccel_pix_fmt = *p;
2711 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2713 InputStream *ist = s->opaque;
2715 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2716 return ist->hwaccel_get_buffer(s, frame, flags);
2718 return avcodec_default_get_buffer2(s, frame, flags);
2721 static int init_input_stream(int ist_index, char *error, int error_len)
2724 InputStream *ist = input_streams[ist_index];
2726 if (ist->decoding_needed) {
2727 AVCodec *codec = ist->dec;
2729 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2730 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2731 return AVERROR(EINVAL);
2734 ist->dec_ctx->opaque = ist;
2735 ist->dec_ctx->get_format = get_format;
2736 ist->dec_ctx->get_buffer2 = get_buffer;
2737 ist->dec_ctx->thread_safe_callbacks = 1;
2739 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2740 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2741 (ist->decoding_needed & DECODING_FOR_OST)) {
2742 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2743 if (ist->decoding_needed & DECODING_FOR_FILTER)
2744 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2747 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2749 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2750 * audio, and video decoders such as cuvid or mediacodec */
2751 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2753 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2754 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2755 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2756 if (ret == AVERROR_EXPERIMENTAL)
2757 abort_codec_experimental(codec, 0);
2759 snprintf(error, error_len,
2760 "Error while opening decoder for input stream "
2762 ist->file_index, ist->st->index, av_err2str(ret));
2765 assert_avoptions(ist->decoder_opts);
2768 ist->next_pts = AV_NOPTS_VALUE;
2769 ist->next_dts = AV_NOPTS_VALUE;
2774 static InputStream *get_input_stream(OutputStream *ost)
2776 if (ost->source_index >= 0)
2777 return input_streams[ost->source_index];
2781 static int compare_int64(const void *a, const void *b)
2783 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2786 /* open the muxer when all the streams are initialized */
2787 static int check_init_output_file(OutputFile *of, int file_index)
2791 for (i = 0; i < of->ctx->nb_streams; i++) {
2792 OutputStream *ost = output_streams[of->ost_index + i];
2793 if (!ost->initialized)
2797 of->ctx->interrupt_callback = int_cb;
2799 ret = avformat_write_header(of->ctx, &of->opts);
2801 av_log(NULL, AV_LOG_ERROR,
2802 "Could not write header for output file #%d "
2803 "(incorrect codec parameters ?): %s",
2804 file_index, av_err2str(ret));
2807 //assert_avoptions(of->opts);
2808 of->header_written = 1;
2810 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2812 if (sdp_filename || want_sdp)
2815 /* flush the muxing queues */
2816 for (i = 0; i < of->ctx->nb_streams; i++) {
2817 OutputStream *ost = output_streams[of->ost_index + i];
2819 while (av_fifo_size(ost->muxing_queue)) {
2821 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2822 write_packet(of, &pkt, ost);
2829 static int init_output_bsfs(OutputStream *ost)
2834 if (!ost->nb_bitstream_filters)
2837 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2838 ctx = ost->bsf_ctx[i];
2840 ret = avcodec_parameters_copy(ctx->par_in,
2841 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2845 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2847 ret = av_bsf_init(ctx);
2849 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2850 ost->bsf_ctx[i]->filter->name);
2855 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2856 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2860 ost->st->time_base = ctx->time_base_out;
2865 static int init_output_stream_streamcopy(OutputStream *ost)
2867 OutputFile *of = output_files[ost->file_index];
2868 InputStream *ist = get_input_stream(ost);
2869 AVCodecParameters *par_dst = ost->st->codecpar;
2870 AVCodecParameters *par_src = ost->ref_par;
2873 uint64_t extra_size;
2875 av_assert0(ist && !ost->filter);
2877 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2878 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2880 av_log(NULL, AV_LOG_FATAL,
2881 "Error setting up codec context options.\n");
2884 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2886 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2888 if (extra_size > INT_MAX) {
2889 return AVERROR(EINVAL);
2892 /* if stream_copy is selected, no need to decode or encode */
2893 par_dst->codec_id = par_src->codec_id;
2894 par_dst->codec_type = par_src->codec_type;
2896 if (!par_dst->codec_tag) {
2897 unsigned int codec_tag;
2898 if (!of->ctx->oformat->codec_tag ||
2899 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2900 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2901 par_dst->codec_tag = par_src->codec_tag;
2904 par_dst->bit_rate = par_src->bit_rate;
2905 par_dst->field_order = par_src->field_order;
2906 par_dst->chroma_location = par_src->chroma_location;
2908 if (par_src->extradata_size) {
2909 par_dst->extradata = av_mallocz(extra_size);
2910 if (!par_dst->extradata) {
2911 return AVERROR(ENOMEM);
2913 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2914 par_dst->extradata_size = par_src->extradata_size;
2916 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2917 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2919 if (!ost->frame_rate.num)
2920 ost->frame_rate = ist->framerate;
2921 ost->st->avg_frame_rate = ost->frame_rate;
2923 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2927 // copy timebase while removing common factors
2928 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2930 if (ist->st->nb_side_data) {
2931 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2932 sizeof(*ist->st->side_data));
2933 if (!ost->st->side_data)
2934 return AVERROR(ENOMEM);
2936 ost->st->nb_side_data = 0;
2937 for (i = 0; i < ist->st->nb_side_data; i++) {
2938 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2939 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2941 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2944 sd_dst->data = av_malloc(sd_src->size);
2946 return AVERROR(ENOMEM);
2947 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2948 sd_dst->size = sd_src->size;
2949 sd_dst->type = sd_src->type;
2950 ost->st->nb_side_data++;
2954 ost->parser = av_parser_init(par_dst->codec_id);
2955 ost->parser_avctx = avcodec_alloc_context3(NULL);
2956 if (!ost->parser_avctx)
2957 return AVERROR(ENOMEM);
2959 switch (par_dst->codec_type) {
2960 case AVMEDIA_TYPE_AUDIO:
2961 if (audio_volume != 256) {
2962 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2965 par_dst->channel_layout = par_src->channel_layout;
2966 par_dst->sample_rate = par_src->sample_rate;
2967 par_dst->channels = par_src->channels;
2968 par_dst->frame_size = par_src->frame_size;
2969 par_dst->block_align = par_src->block_align;
2970 par_dst->initial_padding = par_src->initial_padding;
2971 par_dst->trailing_padding = par_src->trailing_padding;
2972 par_dst->profile = par_src->profile;
2973 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2974 par_dst->block_align= 0;
2975 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2976 par_dst->block_align= 0;
2978 case AVMEDIA_TYPE_VIDEO:
2979 par_dst->format = par_src->format;
2980 par_dst->color_space = par_src->color_space;
2981 par_dst->color_range = par_src->color_range;
2982 par_dst->color_primaries = par_src->color_primaries;
2983 par_dst->color_trc = par_src->color_trc;
2984 par_dst->width = par_src->width;
2985 par_dst->height = par_src->height;
2986 par_dst->video_delay = par_src->video_delay;
2987 par_dst->profile = par_src->profile;
2988 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2990 av_mul_q(ost->frame_aspect_ratio,
2991 (AVRational){ par_dst->height, par_dst->width });
2992 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2993 "with stream copy may produce invalid files\n");
2995 else if (ist->st->sample_aspect_ratio.num)
2996 sar = ist->st->sample_aspect_ratio;
2998 sar = par_src->sample_aspect_ratio;
2999 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3000 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3001 ost->st->r_frame_rate = ist->st->r_frame_rate;
3003 case AVMEDIA_TYPE_SUBTITLE:
3004 par_dst->width = par_src->width;
3005 par_dst->height = par_src->height;
3007 case AVMEDIA_TYPE_UNKNOWN:
3008 case AVMEDIA_TYPE_DATA:
3009 case AVMEDIA_TYPE_ATTACHMENT:
3018 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3022 if (ost->encoding_needed) {
3023 AVCodec *codec = ost->enc;
3024 AVCodecContext *dec = NULL;
3027 if ((ist = get_input_stream(ost)))
3029 if (dec && dec->subtitle_header) {
3030 /* ASS code assumes this buffer is null terminated so add extra byte. */
3031 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3032 if (!ost->enc_ctx->subtitle_header)
3033 return AVERROR(ENOMEM);
3034 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3035 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3037 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3038 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3039 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3041 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3042 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3043 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3045 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3046 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3047 if (!ost->enc_ctx->hw_frames_ctx)
3048 return AVERROR(ENOMEM);
3051 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3052 if (ret == AVERROR_EXPERIMENTAL)
3053 abort_codec_experimental(codec, 1);
3054 snprintf(error, error_len,
3055 "Error while opening encoder for output stream #%d:%d - "
3056 "maybe incorrect parameters such as bit_rate, rate, width or height",
3057 ost->file_index, ost->index);
3060 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3061 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3062 av_buffersink_set_frame_size(ost->filter->filter,
3063 ost->enc_ctx->frame_size);
3064 assert_avoptions(ost->encoder_opts);
3065 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3066 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3067 " It takes bits/s as argument, not kbits/s\n");
3069 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3071 av_log(NULL, AV_LOG_FATAL,
3072 "Error initializing the output stream codec context.\n");
3076 * FIXME: ost->st->codec should't be needed here anymore.
3078 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3082 if (ost->enc_ctx->nb_coded_side_data) {
3085 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3086 sizeof(*ost->st->side_data));
3087 if (!ost->st->side_data)
3088 return AVERROR(ENOMEM);
3090 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3091 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3092 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3094 sd_dst->data = av_malloc(sd_src->size);
3096 return AVERROR(ENOMEM);
3097 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3098 sd_dst->size = sd_src->size;
3099 sd_dst->type = sd_src->type;
3100 ost->st->nb_side_data++;
3104 // copy timebase while removing common factors
3105 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3106 ost->st->codec->codec= ost->enc_ctx->codec;
3107 } else if (ost->stream_copy) {
3108 ret = init_output_stream_streamcopy(ost);
3113 * FIXME: will the codec context used by the parser during streamcopy
3114 * This should go away with the new parser API.
3116 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3121 /* initialize bitstream filters for the output stream
3122 * needs to be done here, because the codec id for streamcopy is not
3123 * known until now */
3124 ret = init_output_bsfs(ost);
3128 ost->initialized = 1;
3130 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3137 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3138 AVCodecContext *avctx)
3141 int n = 1, i, size, index = 0;
3144 for (p = kf; *p; p++)
3148 pts = av_malloc_array(size, sizeof(*pts));
3150 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3155 for (i = 0; i < n; i++) {
3156 char *next = strchr(p, ',');
3161 if (!memcmp(p, "chapters", 8)) {
3163 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3166 if (avf->nb_chapters > INT_MAX - size ||
3167 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3169 av_log(NULL, AV_LOG_FATAL,
3170 "Could not allocate forced key frames array.\n");
3173 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3174 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3176 for (j = 0; j < avf->nb_chapters; j++) {
3177 AVChapter *c = avf->chapters[j];
3178 av_assert1(index < size);
3179 pts[index++] = av_rescale_q(c->start, c->time_base,
3180 avctx->time_base) + t;
3185 t = parse_time_or_die("force_key_frames", p, 1);
3186 av_assert1(index < size);
3187 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3194 av_assert0(index == size);
3195 qsort(pts, size, sizeof(*pts), compare_int64);
3196 ost->forced_kf_count = size;
3197 ost->forced_kf_pts = pts;
3200 static void report_new_stream(int input_index, AVPacket *pkt)
3202 InputFile *file = input_files[input_index];
3203 AVStream *st = file->ctx->streams[pkt->stream_index];
3205 if (pkt->stream_index < file->nb_streams_warn)
3207 av_log(file->ctx, AV_LOG_WARNING,
3208 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3209 av_get_media_type_string(st->codecpar->codec_type),
3210 input_index, pkt->stream_index,
3211 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3212 file->nb_streams_warn = pkt->stream_index + 1;
3215 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3217 AVDictionaryEntry *e;
3219 uint8_t *encoder_string;
3220 int encoder_string_len;
3221 int format_flags = 0;
3222 int codec_flags = 0;
3224 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3227 e = av_dict_get(of->opts, "fflags", NULL, 0);
3229 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3232 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3234 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3236 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3239 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3242 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3243 encoder_string = av_mallocz(encoder_string_len);
3244 if (!encoder_string)
3247 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3248 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3250 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3251 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3252 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3253 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3256 static int transcode_init(void)
3258 int ret = 0, i, j, k;
3259 AVFormatContext *oc;
3262 char error[1024] = {0};
3264 for (i = 0; i < nb_filtergraphs; i++) {
3265 FilterGraph *fg = filtergraphs[i];
3266 for (j = 0; j < fg->nb_outputs; j++) {
3267 OutputFilter *ofilter = fg->outputs[j];
3268 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3270 if (fg->nb_inputs != 1)
3272 for (k = nb_input_streams-1; k >= 0 ; k--)
3273 if (fg->inputs[0]->ist == input_streams[k])
3275 ofilter->ost->source_index = k;
3279 /* init framerate emulation */
3280 for (i = 0; i < nb_input_files; i++) {
3281 InputFile *ifile = input_files[i];
3282 if (ifile->rate_emu)
3283 for (j = 0; j < ifile->nb_streams; j++)
3284 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3287 /* for each output stream, we compute the right encoding parameters */
3288 for (i = 0; i < nb_output_streams; i++) {
3289 ost = output_streams[i];
3290 oc = output_files[ost->file_index]->ctx;
3291 ist = get_input_stream(ost);
3293 if (ost->attachment_filename)
3297 ost->st->disposition = ist->st->disposition;
3299 for (j=0; j<oc->nb_streams; j++) {
3300 AVStream *st = oc->streams[j];
3301 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3304 if (j == oc->nb_streams)
3305 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3306 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3307 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3310 if (!ost->stream_copy) {
3311 AVCodecContext *enc_ctx = ost->enc_ctx;
3312 AVCodecContext *dec_ctx = NULL;
3314 set_encoder_id(output_files[ost->file_index], ost);
3317 dec_ctx = ist->dec_ctx;
3319 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3323 if (qsv_transcode_init(ost))
3328 if (cuvid_transcode_init(ost))
3332 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3333 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3334 filtergraph_is_simple(ost->filter->graph)) {
3335 FilterGraph *fg = ost->filter->graph;
3338 ret = ifilter_parameters_from_decoder(fg->inputs[0],
3341 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
3346 if (configure_filtergraph(fg)) {
3347 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3352 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3353 if (!ost->frame_rate.num)
3354 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3355 if (ist && !ost->frame_rate.num)
3356 ost->frame_rate = ist->framerate;
3357 if (ist && !ost->frame_rate.num)
3358 ost->frame_rate = ist->st->r_frame_rate;
3359 if (ist && !ost->frame_rate.num) {
3360 ost->frame_rate = (AVRational){25, 1};
3361 av_log(NULL, AV_LOG_WARNING,
3363 "about the input framerate is available. Falling "
3364 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3365 "if you want a different framerate.\n",
3366 ost->file_index, ost->index);
3368 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3369 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3370 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3371 ost->frame_rate = ost->enc->supported_framerates[idx];
3373 // reduce frame rate for mpeg4 to be within the spec limits
3374 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3375 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3376 ost->frame_rate.num, ost->frame_rate.den, 65535);
3380 switch (enc_ctx->codec_type) {
3381 case AVMEDIA_TYPE_AUDIO:
3382 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3384 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3385 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3386 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3387 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3388 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3389 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3391 case AVMEDIA_TYPE_VIDEO:
3392 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3393 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3394 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3395 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3396 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3397 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3398 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3400 for (j = 0; j < ost->forced_kf_count; j++)
3401 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3403 enc_ctx->time_base);
3405 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3406 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3407 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3408 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3409 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3410 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3411 if (!strncmp(ost->enc->name, "libx264", 7) &&
3412 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3413 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3414 av_log(NULL, AV_LOG_WARNING,
3415 "No pixel format specified, %s for H.264 encoding chosen.\n"
3416 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3417 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3418 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3419 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3420 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3421 av_log(NULL, AV_LOG_WARNING,
3422 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3423 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3424 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3425 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3427 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3428 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3430 ost->st->avg_frame_rate = ost->frame_rate;
3433 enc_ctx->width != dec_ctx->width ||
3434 enc_ctx->height != dec_ctx->height ||
3435 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3436 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3439 if (ost->forced_keyframes) {
3440 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3441 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3442 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3444 av_log(NULL, AV_LOG_ERROR,
3445 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3448 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3449 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3450 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3451 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3453 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3454 // parse it only for static kf timings
3455 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3456 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3460 case AVMEDIA_TYPE_SUBTITLE:
3461 enc_ctx->time_base = (AVRational){1, 1000};
3462 if (!enc_ctx->width) {
3463 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3464 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3467 case AVMEDIA_TYPE_DATA:
3475 if (ost->disposition) {
3476 static const AVOption opts[] = {
3477 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3478 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3479 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3480 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3481 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3482 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3483 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3484 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3485 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3486 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3487 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3488 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3489 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3490 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3493 static const AVClass class = {
3495 .item_name = av_default_item_name,
3497 .version = LIBAVUTIL_VERSION_INT,
3499 const AVClass *pclass = &class;
3501 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3507 /* init input streams */
3508 for (i = 0; i < nb_input_streams; i++)
3509 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3510 for (i = 0; i < nb_output_streams; i++) {
3511 ost = output_streams[i];
3512 avcodec_close(ost->enc_ctx);
3517 /* open each encoder */
3518 for (i = 0; i < nb_output_streams; i++) {
3519 ret = init_output_stream(output_streams[i], error, sizeof(error));
3524 /* discard unused programs */
3525 for (i = 0; i < nb_input_files; i++) {
3526 InputFile *ifile = input_files[i];
3527 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3528 AVProgram *p = ifile->ctx->programs[j];
3529 int discard = AVDISCARD_ALL;
3531 for (k = 0; k < p->nb_stream_indexes; k++)
3532 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3533 discard = AVDISCARD_DEFAULT;
3536 p->discard = discard;
3540 /* write headers for files with no streams */
3541 for (i = 0; i < nb_output_files; i++) {
3542 oc = output_files[i]->ctx;
3543 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3544 ret = check_init_output_file(output_files[i], i);
3551 /* dump the stream mapping */
3552 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3553 for (i = 0; i < nb_input_streams; i++) {
3554 ist = input_streams[i];
3556 for (j = 0; j < ist->nb_filters; j++) {
3557 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3558 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3559 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3560 ist->filters[j]->name);
3561 if (nb_filtergraphs > 1)
3562 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3563 av_log(NULL, AV_LOG_INFO, "\n");
3568 for (i = 0; i < nb_output_streams; i++) {
3569 ost = output_streams[i];
3571 if (ost->attachment_filename) {
3572 /* an attached file */
3573 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3574 ost->attachment_filename, ost->file_index, ost->index);
3578 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3579 /* output from a complex graph */
3580 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3581 if (nb_filtergraphs > 1)
3582 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3584 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3585 ost->index, ost->enc ? ost->enc->name : "?");
3589 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3590 input_streams[ost->source_index]->file_index,
3591 input_streams[ost->source_index]->st->index,
3594 if (ost->sync_ist != input_streams[ost->source_index])
3595 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3596 ost->sync_ist->file_index,
3597 ost->sync_ist->st->index);
3598 if (ost->stream_copy)
3599 av_log(NULL, AV_LOG_INFO, " (copy)");
3601 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3602 const AVCodec *out_codec = ost->enc;
3603 const char *decoder_name = "?";
3604 const char *in_codec_name = "?";
3605 const char *encoder_name = "?";
3606 const char *out_codec_name = "?";
3607 const AVCodecDescriptor *desc;
3610 decoder_name = in_codec->name;
3611 desc = avcodec_descriptor_get(in_codec->id);
3613 in_codec_name = desc->name;
3614 if (!strcmp(decoder_name, in_codec_name))
3615 decoder_name = "native";
3619 encoder_name = out_codec->name;
3620 desc = avcodec_descriptor_get(out_codec->id);
3622 out_codec_name = desc->name;
3623 if (!strcmp(encoder_name, out_codec_name))
3624 encoder_name = "native";
3627 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3628 in_codec_name, decoder_name,
3629 out_codec_name, encoder_name);
3631 av_log(NULL, AV_LOG_INFO, "\n");
3635 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3639 transcode_init_done = 1;
3644 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3645 static int need_output(void)
3649 for (i = 0; i < nb_output_streams; i++) {
3650 OutputStream *ost = output_streams[i];
3651 OutputFile *of = output_files[ost->file_index];
3652 AVFormatContext *os = output_files[ost->file_index]->ctx;
3654 if (ost->finished ||
3655 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3657 if (ost->frame_number >= ost->max_frames) {
3659 for (j = 0; j < of->ctx->nb_streams; j++)
3660 close_output_stream(output_streams[of->ost_index + j]);
3671 * Select the output stream to process.
3673 * @return selected output stream, or NULL if none available
3675 static OutputStream *choose_output(void)
3678 int64_t opts_min = INT64_MAX;
3679 OutputStream *ost_min = NULL;
3681 for (i = 0; i < nb_output_streams; i++) {
3682 OutputStream *ost = output_streams[i];
3683 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3684 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3686 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3687 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3689 if (!ost->finished && opts < opts_min) {
3691 ost_min = ost->unavailable ? NULL : ost;
3697 static void set_tty_echo(int on)
3701 if (tcgetattr(0, &tty) == 0) {
3702 if (on) tty.c_lflag |= ECHO;
3703 else tty.c_lflag &= ~ECHO;
3704 tcsetattr(0, TCSANOW, &tty);
3709 static int check_keyboard_interaction(int64_t cur_time)
3712 static int64_t last_time;
3713 if (received_nb_signals)
3714 return AVERROR_EXIT;
3715 /* read_key() returns 0 on EOF */
3716 if(cur_time - last_time >= 100000 && !run_as_daemon){
3718 last_time = cur_time;
3722 return AVERROR_EXIT;
3723 if (key == '+') av_log_set_level(av_log_get_level()+10);
3724 if (key == '-') av_log_set_level(av_log_get_level()-10);
3725 if (key == 's') qp_hist ^= 1;
3728 do_hex_dump = do_pkt_dump = 0;
3729 } else if(do_pkt_dump){
3733 av_log_set_level(AV_LOG_DEBUG);
3735 if (key == 'c' || key == 'C'){
3736 char buf[4096], target[64], command[256], arg[256] = {0};
3739 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3742 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3747 fprintf(stderr, "\n");
3749 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3750 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3751 target, time, command, arg);
3752 for (i = 0; i < nb_filtergraphs; i++) {
3753 FilterGraph *fg = filtergraphs[i];
3756 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3757 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3758 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3759 } else if (key == 'c') {
3760 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3761 ret = AVERROR_PATCHWELCOME;
3763 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3765 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3770 av_log(NULL, AV_LOG_ERROR,
3771 "Parse error, at least 3 arguments were expected, "
3772 "only %d given in string '%s'\n", n, buf);
3775 if (key == 'd' || key == 'D'){
3778 debug = input_streams[0]->st->codec->debug<<1;
3779 if(!debug) debug = 1;
3780 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3787 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3792 fprintf(stderr, "\n");
3793 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3794 fprintf(stderr,"error parsing debug value\n");
3796 for(i=0;i<nb_input_streams;i++) {
3797 input_streams[i]->st->codec->debug = debug;
3799 for(i=0;i<nb_output_streams;i++) {
3800 OutputStream *ost = output_streams[i];
3801 ost->enc_ctx->debug = debug;
3803 if(debug) av_log_set_level(AV_LOG_DEBUG);
3804 fprintf(stderr,"debug=%d\n", debug);
3807 fprintf(stderr, "key function\n"
3808 "? show this help\n"
3809 "+ increase verbosity\n"
3810 "- decrease verbosity\n"
3811 "c Send command to first matching filter supporting it\n"
3812 "C Send/Queue command to all matching filters\n"
3813 "D cycle through available debug modes\n"
3814 "h dump packets/hex press to cycle through the 3 states\n"
3816 "s Show QP histogram\n"
3823 static void *input_thread(void *arg)
3826 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3831 ret = av_read_frame(f->ctx, &pkt);
3833 if (ret == AVERROR(EAGAIN)) {
3838 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3841 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3842 if (flags && ret == AVERROR(EAGAIN)) {
3844 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3845 av_log(f->ctx, AV_LOG_WARNING,
3846 "Thread message queue blocking; consider raising the "
3847 "thread_queue_size option (current value: %d)\n",
3848 f->thread_queue_size);
3851 if (ret != AVERROR_EOF)
3852 av_log(f->ctx, AV_LOG_ERROR,
3853 "Unable to send packet to main thread: %s\n",
3855 av_packet_unref(&pkt);
3856 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3864 static void free_input_threads(void)
3868 for (i = 0; i < nb_input_files; i++) {
3869 InputFile *f = input_files[i];
3872 if (!f || !f->in_thread_queue)
3874 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3875 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3876 av_packet_unref(&pkt);
3878 pthread_join(f->thread, NULL);
3880 av_thread_message_queue_free(&f->in_thread_queue);
3884 static int init_input_threads(void)
3888 if (nb_input_files == 1)
3891 for (i = 0; i < nb_input_files; i++) {
3892 InputFile *f = input_files[i];
3894 if (f->ctx->pb ? !f->ctx->pb->seekable :
3895 strcmp(f->ctx->iformat->name, "lavfi"))
3896 f->non_blocking = 1;
3897 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3898 f->thread_queue_size, sizeof(AVPacket));
3902 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3903 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3904 av_thread_message_queue_free(&f->in_thread_queue);
3905 return AVERROR(ret);
3911 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3913 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3915 AV_THREAD_MESSAGE_NONBLOCK : 0);
3919 static int get_input_packet(InputFile *f, AVPacket *pkt)
3923 for (i = 0; i < f->nb_streams; i++) {
3924 InputStream *ist = input_streams[f->ist_index + i];
3925 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3926 int64_t now = av_gettime_relative() - ist->start;
3928 return AVERROR(EAGAIN);
3933 if (nb_input_files > 1)
3934 return get_input_packet_mt(f, pkt);
3936 return av_read_frame(f->ctx, pkt);
3939 static int got_eagain(void)
3942 for (i = 0; i < nb_output_streams; i++)
3943 if (output_streams[i]->unavailable)
3948 static void reset_eagain(void)
3951 for (i = 0; i < nb_input_files; i++)
3952 input_files[i]->eagain = 0;
3953 for (i = 0; i < nb_output_streams; i++)
3954 output_streams[i]->unavailable = 0;
3957 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3958 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3959 AVRational time_base)
3965 return tmp_time_base;
3968 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3971 return tmp_time_base;
3977 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3980 AVCodecContext *avctx;
3981 int i, ret, has_audio = 0;
3982 int64_t duration = 0;
3984 ret = av_seek_frame(is, -1, is->start_time, 0);
3988 for (i = 0; i < ifile->nb_streams; i++) {
3989 ist = input_streams[ifile->ist_index + i];
3990 avctx = ist->dec_ctx;
3993 if (ist->decoding_needed) {
3994 process_input_packet(ist, NULL, 1);
3995 avcodec_flush_buffers(avctx);
3998 /* duration is the length of the last frame in a stream
3999 * when audio stream is present we don't care about
4000 * last video frame length because it's not defined exactly */
4001 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4005 for (i = 0; i < ifile->nb_streams; i++) {
4006 ist = input_streams[ifile->ist_index + i];
4007 avctx = ist->dec_ctx;
4010 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4011 AVRational sample_rate = {1, avctx->sample_rate};
4013 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4017 if (ist->framerate.num) {
4018 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4019 } else if (ist->st->avg_frame_rate.num) {
4020 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4021 } else duration = 1;
4023 if (!ifile->duration)
4024 ifile->time_base = ist->st->time_base;
4025 /* the total duration of the stream, max_pts - min_pts is
4026 * the duration of the stream without the last frame */
4027 duration += ist->max_pts - ist->min_pts;
4028 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4032 if (ifile->loop > 0)
4040 * - 0 -- one packet was read and processed
4041 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4042 * this function should be called again
4043 * - AVERROR_EOF -- this function should not be called again
4045 static int process_input(int file_index)
4047 InputFile *ifile = input_files[file_index];
4048 AVFormatContext *is;
4056 ret = get_input_packet(ifile, &pkt);
4058 if (ret == AVERROR(EAGAIN)) {
4062 if (ret < 0 && ifile->loop) {
4063 if ((ret = seek_to_start(ifile, is)) < 0)
4065 ret = get_input_packet(ifile, &pkt);
4066 if (ret == AVERROR(EAGAIN)) {
4072 if (ret != AVERROR_EOF) {
4073 print_error(is->filename, ret);
4078 for (i = 0; i < ifile->nb_streams; i++) {
4079 ist = input_streams[ifile->ist_index + i];
4080 if (ist->decoding_needed) {
4081 ret = process_input_packet(ist, NULL, 0);
4086 /* mark all outputs that don't go through lavfi as finished */
4087 for (j = 0; j < nb_output_streams; j++) {
4088 OutputStream *ost = output_streams[j];
4090 if (ost->source_index == ifile->ist_index + i &&
4091 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4092 finish_output_stream(ost);
4096 ifile->eof_reached = 1;
4097 return AVERROR(EAGAIN);
4103 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4104 is->streams[pkt.stream_index]);
4106 /* the following test is needed in case new streams appear
4107 dynamically in stream : we ignore them */
4108 if (pkt.stream_index >= ifile->nb_streams) {
4109 report_new_stream(file_index, &pkt);
4110 goto discard_packet;
4113 ist = input_streams[ifile->ist_index + pkt.stream_index];
4115 ist->data_size += pkt.size;
4119 goto discard_packet;
4121 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4122 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4127 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4128 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4129 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4130 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4131 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4132 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4133 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4134 av_ts2str(input_files[ist->file_index]->ts_offset),
4135 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4138 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4139 int64_t stime, stime2;
4140 // Correcting starttime based on the enabled streams
4141 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4142 // so we instead do it here as part of discontinuity handling
4143 if ( ist->next_dts == AV_NOPTS_VALUE
4144 && ifile->ts_offset == -is->start_time
4145 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4146 int64_t new_start_time = INT64_MAX;
4147 for (i=0; i<is->nb_streams; i++) {
4148 AVStream *st = is->streams[i];
4149 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4151 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4153 if (new_start_time > is->start_time) {
4154 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4155 ifile->ts_offset = -new_start_time;
4159 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4160 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4161 ist->wrap_correction_done = 1;
4163 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4164 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4165 ist->wrap_correction_done = 0;
4167 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4168 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4169 ist->wrap_correction_done = 0;
4173 /* add the stream-global side data to the first packet */
4174 if (ist->nb_packets == 1) {
4175 if (ist->st->nb_side_data)
4176 av_packet_split_side_data(&pkt);
4177 for (i = 0; i < ist->st->nb_side_data; i++) {
4178 AVPacketSideData *src_sd = &ist->st->side_data[i];
4181 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4183 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4186 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4190 memcpy(dst_data, src_sd->data, src_sd->size);
4194 if (pkt.dts != AV_NOPTS_VALUE)
4195 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4196 if (pkt.pts != AV_NOPTS_VALUE)
4197 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4199 if (pkt.pts != AV_NOPTS_VALUE)
4200 pkt.pts *= ist->ts_scale;
4201 if (pkt.dts != AV_NOPTS_VALUE)
4202 pkt.dts *= ist->ts_scale;
4204 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4205 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4206 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4207 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4208 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4209 int64_t delta = pkt_dts - ifile->last_ts;
4210 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4211 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4212 ifile->ts_offset -= delta;
4213 av_log(NULL, AV_LOG_DEBUG,
4214 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4215 delta, ifile->ts_offset);
4216 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4217 if (pkt.pts != AV_NOPTS_VALUE)
4218 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4222 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4223 if (pkt.pts != AV_NOPTS_VALUE) {
4224 pkt.pts += duration;
4225 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4226 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4229 if (pkt.dts != AV_NOPTS_VALUE)
4230 pkt.dts += duration;
4232 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4233 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4234 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4235 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4237 int64_t delta = pkt_dts - ist->next_dts;
4238 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4239 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4240 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4241 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4242 ifile->ts_offset -= delta;
4243 av_log(NULL, AV_LOG_DEBUG,
4244 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4245 delta, ifile->ts_offset);
4246 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4247 if (pkt.pts != AV_NOPTS_VALUE)
4248 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4251 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4252 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4253 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4254 pkt.dts = AV_NOPTS_VALUE;
4256 if (pkt.pts != AV_NOPTS_VALUE){
4257 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4258 delta = pkt_pts - ist->next_dts;
4259 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4260 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4261 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4262 pkt.pts = AV_NOPTS_VALUE;
4268 if (pkt.dts != AV_NOPTS_VALUE)
4269 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4272 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4273 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4274 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4275 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4276 av_ts2str(input_files[ist->file_index]->ts_offset),
4277 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4280 sub2video_heartbeat(ist, pkt.pts);
4282 process_input_packet(ist, &pkt, 0);
4285 av_packet_unref(&pkt);
4291 * Perform a step of transcoding for the specified filter graph.
4293 * @param[in] graph filter graph to consider
4294 * @param[out] best_ist input stream where a frame would allow to continue
4295 * @return 0 for success, <0 for error
4297 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4300 int nb_requests, nb_requests_max = 0;
4301 InputFilter *ifilter;
4305 ret = avfilter_graph_request_oldest(graph->graph);
4307 return reap_filters(0);
4309 if (ret == AVERROR_EOF) {
4310 ret = reap_filters(1);
4311 for (i = 0; i < graph->nb_outputs; i++)
4312 close_output_stream(graph->outputs[i]->ost);
4315 if (ret != AVERROR(EAGAIN))
4318 for (i = 0; i < graph->nb_inputs; i++) {
4319 ifilter = graph->inputs[i];
4321 if (input_files[ist->file_index]->eagain ||
4322 input_files[ist->file_index]->eof_reached)
4324 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4325 if (nb_requests > nb_requests_max) {
4326 nb_requests_max = nb_requests;
4332 for (i = 0; i < graph->nb_outputs; i++)
4333 graph->outputs[i]->ost->unavailable = 1;
4339 * Run a single step of transcoding.
4341 * @return 0 for success, <0 for error
4343 static int transcode_step(void)
4349 ost = choose_output();
4356 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4361 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4366 av_assert0(ost->source_index >= 0);
4367 ist = input_streams[ost->source_index];
4370 ret = process_input(ist->file_index);
4371 if (ret == AVERROR(EAGAIN)) {
4372 if (input_files[ist->file_index]->eagain)
4373 ost->unavailable = 1;
4378 return ret == AVERROR_EOF ? 0 : ret;
4380 return reap_filters(0);
4384 * The following code is the main loop of the file converter
4386 static int transcode(void)
4389 AVFormatContext *os;
4392 int64_t timer_start;
4393 int64_t total_packets_written = 0;
4395 ret = transcode_init();
4399 if (stdin_interaction) {
4400 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4403 timer_start = av_gettime_relative();
4406 if ((ret = init_input_threads()) < 0)
4410 while (!received_sigterm) {
4411 int64_t cur_time= av_gettime_relative();
4413 /* if 'q' pressed, exits */
4414 if (stdin_interaction)
4415 if (check_keyboard_interaction(cur_time) < 0)
4418 /* check if there's any stream where output is still needed */
4419 if (!need_output()) {
4420 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4424 ret = transcode_step();
4425 if (ret < 0 && ret != AVERROR_EOF) {
4427 av_strerror(ret, errbuf, sizeof(errbuf));
4429 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4433 /* dump report by using the output first video and audio streams */
4434 print_report(0, timer_start, cur_time);
4437 free_input_threads();
4440 /* at the end of stream, we must flush the decoder buffers */
4441 for (i = 0; i < nb_input_streams; i++) {
4442 ist = input_streams[i];
4443 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4444 process_input_packet(ist, NULL, 0);
4451 /* write the trailer if needed and close file */
4452 for (i = 0; i < nb_output_files; i++) {
4453 os = output_files[i]->ctx;
4454 if (!output_files[i]->header_written) {
4455 av_log(NULL, AV_LOG_ERROR,
4456 "Nothing was written into output file %d (%s), because "
4457 "at least one of its streams received no packets.\n",
4461 if ((ret = av_write_trailer(os)) < 0) {
4462 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4468 /* dump report by using the first video and audio streams */
4469 print_report(1, timer_start, av_gettime_relative());
4471 /* close each encoder */
4472 for (i = 0; i < nb_output_streams; i++) {
4473 ost = output_streams[i];
4474 if (ost->encoding_needed) {
4475 av_freep(&ost->enc_ctx->stats_in);
4477 total_packets_written += ost->packets_written;
4480 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4481 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4485 /* close each decoder */
4486 for (i = 0; i < nb_input_streams; i++) {
4487 ist = input_streams[i];
4488 if (ist->decoding_needed) {
4489 avcodec_close(ist->dec_ctx);
4490 if (ist->hwaccel_uninit)
4491 ist->hwaccel_uninit(ist->dec_ctx);
4495 av_buffer_unref(&hw_device_ctx);
4502 free_input_threads();
4505 if (output_streams) {
4506 for (i = 0; i < nb_output_streams; i++) {
4507 ost = output_streams[i];
4510 if (fclose(ost->logfile))
4511 av_log(NULL, AV_LOG_ERROR,
4512 "Error closing logfile, loss of information possible: %s\n",
4513 av_err2str(AVERROR(errno)));
4514 ost->logfile = NULL;
4516 av_freep(&ost->forced_kf_pts);
4517 av_freep(&ost->apad);
4518 av_freep(&ost->disposition);
4519 av_dict_free(&ost->encoder_opts);
4520 av_dict_free(&ost->sws_dict);
4521 av_dict_free(&ost->swr_opts);
4522 av_dict_free(&ost->resample_opts);
4530 static int64_t getutime(void)
4533 struct rusage rusage;
4535 getrusage(RUSAGE_SELF, &rusage);
4536 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4537 #elif HAVE_GETPROCESSTIMES
4539 FILETIME c, e, k, u;
4540 proc = GetCurrentProcess();
4541 GetProcessTimes(proc, &c, &e, &k, &u);
4542 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4544 return av_gettime_relative();
4548 static int64_t getmaxrss(void)
4550 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4551 struct rusage rusage;
4552 getrusage(RUSAGE_SELF, &rusage);
4553 return (int64_t)rusage.ru_maxrss * 1024;
4554 #elif HAVE_GETPROCESSMEMORYINFO
4556 PROCESS_MEMORY_COUNTERS memcounters;
4557 proc = GetCurrentProcess();
4558 memcounters.cb = sizeof(memcounters);
4559 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4560 return memcounters.PeakPagefileUsage;
4566 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4570 int main(int argc, char **argv)
4577 register_exit(ffmpeg_cleanup);
4579 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4581 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4582 parse_loglevel(argc, argv, options);
4584 if(argc>1 && !strcmp(argv[1], "-d")){
4586 av_log_set_callback(log_callback_null);
4591 avcodec_register_all();
4593 avdevice_register_all();
4595 avfilter_register_all();
4597 avformat_network_init();
4599 show_banner(argc, argv, options);
4601 /* parse options and open all input/output files */
4602 ret = ffmpeg_parse_options(argc, argv);
4606 if (nb_output_files <= 0 && nb_input_files == 0) {
4608 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4612 /* file converter / grab */
4613 if (nb_output_files <= 0) {
4614 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4618 // if (nb_input_files == 0) {
4619 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4623 for (i = 0; i < nb_output_files; i++) {
4624 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4628 current_time = ti = getutime();
4629 if (transcode() < 0)
4631 ti = getutime() - ti;
4633 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4635 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4636 decode_error_stat[0], decode_error_stat[1]);
4637 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4640 exit_program(received_nb_signals ? 255 : main_return_code);
4641 return main_return_code;