2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int want_sdp = 1;
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 InputStream **input_streams = NULL;
140 int nb_input_streams = 0;
141 InputFile **input_files = NULL;
142 int nb_input_files = 0;
144 OutputStream **output_streams = NULL;
145 int nb_output_streams = 0;
146 OutputFile **output_files = NULL;
147 int nb_output_files = 0;
149 FilterGraph **filtergraphs;
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
160 static void free_input_threads(void);
164 Convert subtitles to video with alpha to insert them in filter graphs.
165 This is a temporary solution until libavfilter gets real subtitles support.
168 static int sub2video_get_blank_frame(InputStream *ist)
171 AVFrame *frame = ist->sub2video.frame;
173 av_frame_unref(frame);
174 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
176 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
177 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186 uint32_t *pal, *dst2;
190 if (r->type != SUBTITLE_BITMAP) {
191 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196 r->x, r->y, r->w, r->h, w, h
201 dst += r->y * dst_linesize + r->x * 4;
203 pal = (uint32_t *)r->data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 AVFrame *frame = ist->sub2video.frame;
233 int64_t pts, end_pts;
238 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 num_rects = sub->num_rects;
244 pts = ist->sub2video.end_pts;
248 if (sub2video_get_blank_frame(ist) < 0) {
249 av_log(ist->dec_ctx, AV_LOG_ERROR,
250 "Impossible to get a blank canvas.\n");
253 dst = frame->data [0];
254 dst_linesize = frame->linesize[0];
255 for (i = 0; i < num_rects; i++)
256 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257 sub2video_push_ref(ist, pts);
258 ist->sub2video.end_pts = end_pts;
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 InputFile *infile = input_files[ist->file_index];
267 /* When a frame is read from a file, examine all sub2video streams in
268 the same file and send the sub2video frame again. Otherwise, decoded
269 video frames could be accumulating in the filter graph while a filter
270 (possibly overlay) is desperately waiting for a subtitle frame. */
271 for (i = 0; i < infile->nb_streams; i++) {
272 InputStream *ist2 = input_streams[infile->ist_index + i];
273 if (!ist2->sub2video.frame)
275 /* subtitles seem to be usually muxed ahead of other streams;
276 if not, subtracting a larger time here is necessary */
277 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278 /* do not send the heartbeat frame if the subtitle is already ahead */
279 if (pts2 <= ist2->sub2video.last_pts)
281 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282 sub2video_update(ist2, NULL);
283 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286 sub2video_push_ref(ist2, pts2);
290 static void sub2video_flush(InputStream *ist)
294 if (ist->sub2video.end_pts < INT64_MAX)
295 sub2video_update(ist, NULL);
296 for (i = 0; i < ist->nb_filters; i++)
297 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
300 /* end of sub2video hack */
302 static void term_exit_sigsafe(void)
306 tcsetattr (0, TCSANOW, &oldtty);
312 av_log(NULL, AV_LOG_QUIET, "%s", "");
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
323 sigterm_handler(int sig)
325 received_sigterm = sig;
326 received_nb_signals++;
328 if(received_nb_signals > 3) {
329 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330 strlen("Received > 3 system signals, hard exiting\n"));
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
344 case CTRL_BREAK_EVENT:
345 sigterm_handler(SIGINT);
348 case CTRL_CLOSE_EVENT:
349 case CTRL_LOGOFF_EVENT:
350 case CTRL_SHUTDOWN_EVENT:
351 sigterm_handler(SIGTERM);
352 /* Basically, with these 3 events, when we return from this method the
353 process is hard terminated, so stall as long as we need to
354 to try and let the main thread(s) clean up and gracefully terminate
355 (we have at most 5 seconds, but should be done far before that). */
356 while (!ffmpeg_exited) {
362 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (!run_as_daemon && stdin_interaction) {
373 if (tcgetattr (0, &tty) == 0) {
377 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
378 |INLCR|IGNCR|ICRNL|IXON);
379 tty.c_oflag |= OPOST;
380 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
381 tty.c_cflag &= ~(CSIZE|PARENB);
386 tcsetattr (0, TCSANOW, &tty);
388 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
392 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
393 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
395 signal(SIGXCPU, sigterm_handler);
397 #if HAVE_SETCONSOLECTRLHANDLER
398 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
402 /* read a key without blocking */
403 static int read_key(void)
415 n = select(1, &rfds, NULL, NULL, &tv);
424 # if HAVE_PEEKNAMEDPIPE
426 static HANDLE input_handle;
429 input_handle = GetStdHandle(STD_INPUT_HANDLE);
430 is_pipe = !GetConsoleMode(input_handle, &dw);
434 /* When running under a GUI, you will end here. */
435 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
436 // input pipe may have been closed by the program that ran ffmpeg
454 static int decode_interrupt_cb(void *ctx)
456 return received_nb_signals > transcode_init_done;
459 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
461 static void ffmpeg_cleanup(int ret)
466 int maxrss = getmaxrss() / 1024;
467 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
470 for (i = 0; i < nb_filtergraphs; i++) {
471 FilterGraph *fg = filtergraphs[i];
472 avfilter_graph_free(&fg->graph);
473 for (j = 0; j < fg->nb_inputs; j++) {
474 av_freep(&fg->inputs[j]->name);
475 av_freep(&fg->inputs[j]);
477 av_freep(&fg->inputs);
478 for (j = 0; j < fg->nb_outputs; j++) {
479 av_freep(&fg->outputs[j]->name);
480 av_freep(&fg->outputs[j]);
482 av_freep(&fg->outputs);
483 av_freep(&fg->graph_desc);
485 av_freep(&filtergraphs[i]);
487 av_freep(&filtergraphs);
489 av_freep(&subtitle_out);
492 for (i = 0; i < nb_output_files; i++) {
493 OutputFile *of = output_files[i];
498 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
500 avformat_free_context(s);
501 av_dict_free(&of->opts);
503 av_freep(&output_files[i]);
505 for (i = 0; i < nb_output_streams; i++) {
506 OutputStream *ost = output_streams[i];
511 for (j = 0; j < ost->nb_bitstream_filters; j++)
512 av_bsf_free(&ost->bsf_ctx[j]);
513 av_freep(&ost->bsf_ctx);
514 av_freep(&ost->bsf_extradata_updated);
516 av_frame_free(&ost->filtered_frame);
517 av_frame_free(&ost->last_frame);
518 av_dict_free(&ost->encoder_opts);
520 av_parser_close(ost->parser);
521 avcodec_free_context(&ost->parser_avctx);
523 av_freep(&ost->forced_keyframes);
524 av_expr_free(ost->forced_keyframes_pexpr);
525 av_freep(&ost->avfilter);
526 av_freep(&ost->logfile_prefix);
528 av_freep(&ost->audio_channels_map);
529 ost->audio_channels_mapped = 0;
531 av_dict_free(&ost->sws_dict);
533 avcodec_free_context(&ost->enc_ctx);
534 avcodec_parameters_free(&ost->ref_par);
536 while (av_fifo_size(ost->muxing_queue)) {
538 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
539 av_packet_unref(&pkt);
541 av_fifo_free(ost->muxing_queue);
543 av_freep(&output_streams[i]);
546 free_input_threads();
548 for (i = 0; i < nb_input_files; i++) {
549 avformat_close_input(&input_files[i]->ctx);
550 av_freep(&input_files[i]);
552 for (i = 0; i < nb_input_streams; i++) {
553 InputStream *ist = input_streams[i];
555 av_frame_free(&ist->decoded_frame);
556 av_frame_free(&ist->filter_frame);
557 av_dict_free(&ist->decoder_opts);
558 avsubtitle_free(&ist->prev_sub.subtitle);
559 av_frame_free(&ist->sub2video.frame);
560 av_freep(&ist->filters);
561 av_freep(&ist->hwaccel_device);
562 av_freep(&ist->dts_buffer);
564 avcodec_free_context(&ist->dec_ctx);
566 av_freep(&input_streams[i]);
570 if (fclose(vstats_file))
571 av_log(NULL, AV_LOG_ERROR,
572 "Error closing vstats file, loss of information possible: %s\n",
573 av_err2str(AVERROR(errno)));
575 av_freep(&vstats_filename);
577 av_freep(&input_streams);
578 av_freep(&input_files);
579 av_freep(&output_streams);
580 av_freep(&output_files);
584 avformat_network_deinit();
586 if (received_sigterm) {
587 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
588 (int) received_sigterm);
589 } else if (ret && transcode_init_done) {
590 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
596 void remove_avoptions(AVDictionary **a, AVDictionary *b)
598 AVDictionaryEntry *t = NULL;
600 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
601 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
605 void assert_avoptions(AVDictionary *m)
607 AVDictionaryEntry *t;
608 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
609 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
614 static void abort_codec_experimental(AVCodec *c, int encoder)
619 static void update_benchmark(const char *fmt, ...)
621 if (do_benchmark_all) {
622 int64_t t = getutime();
628 vsnprintf(buf, sizeof(buf), fmt, va);
630 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
636 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
639 for (i = 0; i < nb_output_streams; i++) {
640 OutputStream *ost2 = output_streams[i];
641 ost2->finished |= ost == ost2 ? this_stream : others;
645 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
647 AVFormatContext *s = of->ctx;
648 AVStream *st = ost->st;
651 if (!of->header_written) {
653 /* the muxer is not initialized yet, buffer the packet */
654 if (!av_fifo_space(ost->muxing_queue)) {
655 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
656 ost->max_muxing_queue_size);
657 if (new_size <= av_fifo_size(ost->muxing_queue)) {
658 av_log(NULL, AV_LOG_ERROR,
659 "Too many packets buffered for output stream %d:%d.\n",
660 ost->file_index, ost->st->index);
663 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
667 av_packet_move_ref(&tmp_pkt, pkt);
668 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
672 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
673 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
674 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
677 * Audio encoders may split the packets -- #frames in != #packets out.
678 * But there is no reordering, so we can limit the number of output packets
679 * by simply dropping them here.
680 * Counting encoded video frames needs to be done separately because of
681 * reordering, see do_video_out()
683 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
684 if (ost->frame_number >= ost->max_frames) {
685 av_packet_unref(pkt);
690 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
692 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
694 ost->quality = sd ? AV_RL32(sd) : -1;
695 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
697 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
699 ost->error[i] = AV_RL64(sd + 8 + 8*i);
704 if (ost->frame_rate.num && ost->is_cfr) {
705 if (pkt->duration > 0)
706 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
707 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
712 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
713 if (pkt->dts != AV_NOPTS_VALUE &&
714 pkt->pts != AV_NOPTS_VALUE &&
715 pkt->dts > pkt->pts) {
716 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
718 ost->file_index, ost->st->index);
720 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
721 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
722 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
724 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
725 pkt->dts != AV_NOPTS_VALUE &&
726 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
727 ost->last_mux_dts != AV_NOPTS_VALUE) {
728 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
729 if (pkt->dts < max) {
730 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
731 av_log(s, loglevel, "Non-monotonous DTS in output stream "
732 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
733 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
735 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
738 av_log(s, loglevel, "changing to %"PRId64". This may result "
739 "in incorrect timestamps in the output file.\n",
741 if (pkt->pts >= pkt->dts)
742 pkt->pts = FFMAX(pkt->pts, max);
747 ost->last_mux_dts = pkt->dts;
749 ost->data_size += pkt->size;
750 ost->packets_written++;
752 pkt->stream_index = ost->index;
755 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
756 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
757 av_get_media_type_string(ost->enc_ctx->codec_type),
758 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
759 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
764 ret = av_interleaved_write_frame(s, pkt);
766 print_error("av_interleaved_write_frame()", ret);
767 main_return_code = 1;
768 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
770 av_packet_unref(pkt);
773 static void close_output_stream(OutputStream *ost)
775 OutputFile *of = output_files[ost->file_index];
777 ost->finished |= ENCODER_FINISHED;
779 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
780 of->recording_time = FFMIN(of->recording_time, end);
784 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
788 /* apply the output bitstream filters, if any */
789 if (ost->nb_bitstream_filters) {
792 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
798 /* get a packet from the previous filter up the chain */
799 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
800 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
801 * the api states this shouldn't happen after init(). Propagate it here to the
802 * muxer and to the next filters in the chain to workaround this.
803 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
804 * par_out->extradata and adapt muxers accordingly to get rid of this. */
805 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
806 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
809 ost->bsf_extradata_updated[idx - 1] |= 1;
811 if (ret == AVERROR(EAGAIN)) {
818 /* send it to the next filter down the chain or to the muxer */
819 if (idx < ost->nb_bitstream_filters) {
820 /* HACK/FIXME! - See above */
821 if (!(ost->bsf_extradata_updated[idx] & 2)) {
822 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
825 ost->bsf_extradata_updated[idx] |= 2;
827 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
832 write_packet(of, pkt, ost);
835 write_packet(of, pkt, ost);
838 if (ret < 0 && ret != AVERROR_EOF) {
839 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
840 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
846 static int check_recording_time(OutputStream *ost)
848 OutputFile *of = output_files[ost->file_index];
850 if (of->recording_time != INT64_MAX &&
851 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
852 AV_TIME_BASE_Q) >= 0) {
853 close_output_stream(ost);
859 static void do_audio_out(OutputFile *of, OutputStream *ost,
862 AVCodecContext *enc = ost->enc_ctx;
866 av_init_packet(&pkt);
870 if (!check_recording_time(ost))
873 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
874 frame->pts = ost->sync_opts;
875 ost->sync_opts = frame->pts + frame->nb_samples;
876 ost->samples_encoded += frame->nb_samples;
877 ost->frames_encoded++;
879 av_assert0(pkt.size || !pkt.data);
880 update_benchmark(NULL);
882 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
883 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
884 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
885 enc->time_base.num, enc->time_base.den);
888 ret = avcodec_send_frame(enc, frame);
893 ret = avcodec_receive_packet(enc, &pkt);
894 if (ret == AVERROR(EAGAIN))
899 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
901 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
904 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
905 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
906 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
907 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
910 output_packet(of, &pkt, ost);
915 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
919 static void do_subtitle_out(OutputFile *of,
924 int subtitle_out_max_size = 1024 * 1024;
925 int subtitle_out_size, nb, i;
930 if (sub->pts == AV_NOPTS_VALUE) {
931 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
940 subtitle_out = av_malloc(subtitle_out_max_size);
942 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
947 /* Note: DVB subtitle need one packet to draw them and one other
948 packet to clear them */
949 /* XXX: signal it in the codec context ? */
950 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
955 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
957 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
958 pts -= output_files[ost->file_index]->start_time;
959 for (i = 0; i < nb; i++) {
960 unsigned save_num_rects = sub->num_rects;
962 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
963 if (!check_recording_time(ost))
967 // start_display_time is required to be 0
968 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
969 sub->end_display_time -= sub->start_display_time;
970 sub->start_display_time = 0;
974 ost->frames_encoded++;
976 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
977 subtitle_out_max_size, sub);
979 sub->num_rects = save_num_rects;
980 if (subtitle_out_size < 0) {
981 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
985 av_init_packet(&pkt);
986 pkt.data = subtitle_out;
987 pkt.size = subtitle_out_size;
988 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
989 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
990 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
991 /* XXX: the pts correction is handled here. Maybe handling
992 it in the codec would be better */
994 pkt.pts += 90 * sub->start_display_time;
996 pkt.pts += 90 * sub->end_display_time;
999 output_packet(of, &pkt, ost);
1003 static void do_video_out(OutputFile *of,
1005 AVFrame *next_picture,
1008 int ret, format_video_sync;
1010 AVCodecContext *enc = ost->enc_ctx;
1011 AVCodecParameters *mux_par = ost->st->codecpar;
1012 int nb_frames, nb0_frames, i;
1013 double delta, delta0;
1014 double duration = 0;
1016 InputStream *ist = NULL;
1017 AVFilterContext *filter = ost->filter->filter;
1019 if (ost->source_index >= 0)
1020 ist = input_streams[ost->source_index];
1022 if (filter->inputs[0]->frame_rate.num > 0 &&
1023 filter->inputs[0]->frame_rate.den > 0)
1024 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1026 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1027 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1029 if (!ost->filters_script &&
1033 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1034 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1037 if (!next_picture) {
1039 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1040 ost->last_nb0_frames[1],
1041 ost->last_nb0_frames[2]);
1043 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1044 delta = delta0 + duration;
1046 /* by default, we output a single frame */
1047 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1050 format_video_sync = video_sync_method;
1051 if (format_video_sync == VSYNC_AUTO) {
1052 if(!strcmp(of->ctx->oformat->name, "avi")) {
1053 format_video_sync = VSYNC_VFR;
1055 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1057 && format_video_sync == VSYNC_CFR
1058 && input_files[ist->file_index]->ctx->nb_streams == 1
1059 && input_files[ist->file_index]->input_ts_offset == 0) {
1060 format_video_sync = VSYNC_VSCFR;
1062 if (format_video_sync == VSYNC_CFR && copy_ts) {
1063 format_video_sync = VSYNC_VSCFR;
1066 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1070 format_video_sync != VSYNC_PASSTHROUGH &&
1071 format_video_sync != VSYNC_DROP) {
1072 if (delta0 < -0.6) {
1073 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1075 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1076 sync_ipts = ost->sync_opts;
1081 switch (format_video_sync) {
1083 if (ost->frame_number == 0 && delta0 >= 0.5) {
1084 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1087 ost->sync_opts = lrint(sync_ipts);
1090 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1091 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1093 } else if (delta < -1.1)
1095 else if (delta > 1.1) {
1096 nb_frames = lrintf(delta);
1098 nb0_frames = lrintf(delta0 - 0.6);
1104 else if (delta > 0.6)
1105 ost->sync_opts = lrint(sync_ipts);
1108 case VSYNC_PASSTHROUGH:
1109 ost->sync_opts = lrint(sync_ipts);
1116 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1117 nb0_frames = FFMIN(nb0_frames, nb_frames);
1119 memmove(ost->last_nb0_frames + 1,
1120 ost->last_nb0_frames,
1121 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1122 ost->last_nb0_frames[0] = nb0_frames;
1124 if (nb0_frames == 0 && ost->last_dropped) {
1126 av_log(NULL, AV_LOG_VERBOSE,
1127 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1128 ost->frame_number, ost->st->index, ost->last_frame->pts);
1130 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1131 if (nb_frames > dts_error_threshold * 30) {
1132 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1136 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1137 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1139 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1141 /* duplicates frame if needed */
1142 for (i = 0; i < nb_frames; i++) {
1143 AVFrame *in_picture;
1144 av_init_packet(&pkt);
1148 if (i < nb0_frames && ost->last_frame) {
1149 in_picture = ost->last_frame;
1151 in_picture = next_picture;
1156 in_picture->pts = ost->sync_opts;
1159 if (!check_recording_time(ost))
1161 if (ost->frame_number >= ost->max_frames)
1165 #if FF_API_LAVF_FMT_RAWPICTURE
1166 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1167 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1168 /* raw pictures are written as AVPicture structure to
1169 avoid any copies. We support temporarily the older
1171 if (in_picture->interlaced_frame)
1172 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1174 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1175 pkt.data = (uint8_t *)in_picture;
1176 pkt.size = sizeof(AVPicture);
1177 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1178 pkt.flags |= AV_PKT_FLAG_KEY;
1180 output_packet(of, &pkt, ost);
1184 int forced_keyframe = 0;
1187 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1188 ost->top_field_first >= 0)
1189 in_picture->top_field_first = !!ost->top_field_first;
1191 if (in_picture->interlaced_frame) {
1192 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1193 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1195 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1197 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1199 in_picture->quality = enc->global_quality;
1200 in_picture->pict_type = 0;
1202 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1203 in_picture->pts * av_q2d(enc->time_base) : NAN;
1204 if (ost->forced_kf_index < ost->forced_kf_count &&
1205 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1206 ost->forced_kf_index++;
1207 forced_keyframe = 1;
1208 } else if (ost->forced_keyframes_pexpr) {
1210 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1211 res = av_expr_eval(ost->forced_keyframes_pexpr,
1212 ost->forced_keyframes_expr_const_values, NULL);
1213 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1214 ost->forced_keyframes_expr_const_values[FKF_N],
1215 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1216 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1217 ost->forced_keyframes_expr_const_values[FKF_T],
1218 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1221 forced_keyframe = 1;
1222 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1223 ost->forced_keyframes_expr_const_values[FKF_N];
1224 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1225 ost->forced_keyframes_expr_const_values[FKF_T];
1226 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1229 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1230 } else if ( ost->forced_keyframes
1231 && !strncmp(ost->forced_keyframes, "source", 6)
1232 && in_picture->key_frame==1) {
1233 forced_keyframe = 1;
1236 if (forced_keyframe) {
1237 in_picture->pict_type = AV_PICTURE_TYPE_I;
1238 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1241 update_benchmark(NULL);
1243 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1244 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1245 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1246 enc->time_base.num, enc->time_base.den);
1249 ost->frames_encoded++;
1251 ret = avcodec_send_frame(enc, in_picture);
1256 ret = avcodec_receive_packet(enc, &pkt);
1257 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1258 if (ret == AVERROR(EAGAIN))
1264 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1265 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1266 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1267 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1270 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1271 pkt.pts = ost->sync_opts;
1273 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1276 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1277 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1278 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1279 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1282 frame_size = pkt.size;
1283 output_packet(of, &pkt, ost);
1285 /* if two pass, output log */
1286 if (ost->logfile && enc->stats_out) {
1287 fprintf(ost->logfile, "%s", enc->stats_out);
1293 * For video, number of frames in == number of packets out.
1294 * But there may be reordering, so we can't throw away frames on encoder
1295 * flush, we need to limit them here, before they go into encoder.
1297 ost->frame_number++;
1299 if (vstats_filename && frame_size)
1300 do_video_stats(ost, frame_size);
1303 if (!ost->last_frame)
1304 ost->last_frame = av_frame_alloc();
1305 av_frame_unref(ost->last_frame);
1306 if (next_picture && ost->last_frame)
1307 av_frame_ref(ost->last_frame, next_picture);
1309 av_frame_free(&ost->last_frame);
1313 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1317 static double psnr(double d)
1319 return -10.0 * log10(d);
1322 static void do_video_stats(OutputStream *ost, int frame_size)
1324 AVCodecContext *enc;
1326 double ti1, bitrate, avg_bitrate;
1328 /* this is executed just the first time do_video_stats is called */
1330 vstats_file = fopen(vstats_filename, "w");
1338 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1339 frame_number = ost->st->nb_frames;
1340 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1341 ost->quality / (float)FF_QP2LAMBDA);
1343 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1344 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1346 fprintf(vstats_file,"f_size= %6d ", frame_size);
1347 /* compute pts value */
1348 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1352 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1353 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1354 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1355 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1356 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1360 static void finish_output_stream(OutputStream *ost)
1362 OutputFile *of = output_files[ost->file_index];
1365 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1368 for (i = 0; i < of->ctx->nb_streams; i++)
1369 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1374 * Get and encode new output from any of the filtergraphs, without causing
1377 * @return 0 for success, <0 for severe errors
1379 static int reap_filters(int flush)
1381 AVFrame *filtered_frame = NULL;
1384 /* Reap all buffers present in the buffer sinks */
1385 for (i = 0; i < nb_output_streams; i++) {
1386 OutputStream *ost = output_streams[i];
1387 OutputFile *of = output_files[ost->file_index];
1388 AVFilterContext *filter;
1389 AVCodecContext *enc = ost->enc_ctx;
1394 filter = ost->filter->filter;
1396 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1397 return AVERROR(ENOMEM);
1399 filtered_frame = ost->filtered_frame;
1402 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1403 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1404 AV_BUFFERSINK_FLAG_NO_REQUEST);
1406 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1407 av_log(NULL, AV_LOG_WARNING,
1408 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1409 } else if (flush && ret == AVERROR_EOF) {
1410 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1411 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1415 if (ost->finished) {
1416 av_frame_unref(filtered_frame);
1419 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1420 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1421 AVRational tb = enc->time_base;
1422 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1424 tb.den <<= extra_bits;
1426 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1427 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1428 float_pts /= 1 << extra_bits;
1429 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1430 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1432 filtered_frame->pts =
1433 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1434 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1436 //if (ost->source_index >= 0)
1437 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1439 switch (filter->inputs[0]->type) {
1440 case AVMEDIA_TYPE_VIDEO:
1441 if (!ost->frame_aspect_ratio.num)
1442 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1445 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1446 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1448 enc->time_base.num, enc->time_base.den);
1451 do_video_out(of, ost, filtered_frame, float_pts);
1453 case AVMEDIA_TYPE_AUDIO:
1454 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1455 enc->channels != av_frame_get_channels(filtered_frame)) {
1456 av_log(NULL, AV_LOG_ERROR,
1457 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1460 do_audio_out(of, ost, filtered_frame);
1463 // TODO support subtitle filters
1467 av_frame_unref(filtered_frame);
1474 static void print_final_stats(int64_t total_size)
1476 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1477 uint64_t subtitle_size = 0;
1478 uint64_t data_size = 0;
1479 float percent = -1.0;
1483 for (i = 0; i < nb_output_streams; i++) {
1484 OutputStream *ost = output_streams[i];
1485 switch (ost->enc_ctx->codec_type) {
1486 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1487 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1488 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1489 default: other_size += ost->data_size; break;
1491 extra_size += ost->enc_ctx->extradata_size;
1492 data_size += ost->data_size;
1493 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1494 != AV_CODEC_FLAG_PASS1)
1498 if (data_size && total_size>0 && total_size >= data_size)
1499 percent = 100.0 * (total_size - data_size) / data_size;
1501 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1502 video_size / 1024.0,
1503 audio_size / 1024.0,
1504 subtitle_size / 1024.0,
1505 other_size / 1024.0,
1506 extra_size / 1024.0);
1508 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1510 av_log(NULL, AV_LOG_INFO, "unknown");
1511 av_log(NULL, AV_LOG_INFO, "\n");
1513 /* print verbose per-stream stats */
1514 for (i = 0; i < nb_input_files; i++) {
1515 InputFile *f = input_files[i];
1516 uint64_t total_packets = 0, total_size = 0;
1518 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1519 i, f->ctx->filename);
1521 for (j = 0; j < f->nb_streams; j++) {
1522 InputStream *ist = input_streams[f->ist_index + j];
1523 enum AVMediaType type = ist->dec_ctx->codec_type;
1525 total_size += ist->data_size;
1526 total_packets += ist->nb_packets;
1528 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1529 i, j, media_type_string(type));
1530 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1531 ist->nb_packets, ist->data_size);
1533 if (ist->decoding_needed) {
1534 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1535 ist->frames_decoded);
1536 if (type == AVMEDIA_TYPE_AUDIO)
1537 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1538 av_log(NULL, AV_LOG_VERBOSE, "; ");
1541 av_log(NULL, AV_LOG_VERBOSE, "\n");
1544 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1545 total_packets, total_size);
1548 for (i = 0; i < nb_output_files; i++) {
1549 OutputFile *of = output_files[i];
1550 uint64_t total_packets = 0, total_size = 0;
1552 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1553 i, of->ctx->filename);
1555 for (j = 0; j < of->ctx->nb_streams; j++) {
1556 OutputStream *ost = output_streams[of->ost_index + j];
1557 enum AVMediaType type = ost->enc_ctx->codec_type;
1559 total_size += ost->data_size;
1560 total_packets += ost->packets_written;
1562 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1563 i, j, media_type_string(type));
1564 if (ost->encoding_needed) {
1565 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1566 ost->frames_encoded);
1567 if (type == AVMEDIA_TYPE_AUDIO)
1568 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1569 av_log(NULL, AV_LOG_VERBOSE, "; ");
1572 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1573 ost->packets_written, ost->data_size);
1575 av_log(NULL, AV_LOG_VERBOSE, "\n");
1578 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1579 total_packets, total_size);
1581 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1582 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1584 av_log(NULL, AV_LOG_WARNING, "\n");
1586 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1591 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1594 AVBPrint buf_script;
1596 AVFormatContext *oc;
1598 AVCodecContext *enc;
1599 int frame_number, vid, i;
1602 int64_t pts = INT64_MIN + 1;
1603 static int64_t last_time = -1;
1604 static int qp_histogram[52];
1605 int hours, mins, secs, us;
1609 if (!print_stats && !is_last_report && !progress_avio)
1612 if (!is_last_report) {
1613 if (last_time == -1) {
1614 last_time = cur_time;
1617 if ((cur_time - last_time) < 500000)
1619 last_time = cur_time;
1622 t = (cur_time-timer_start) / 1000000.0;
1625 oc = output_files[0]->ctx;
1627 total_size = avio_size(oc->pb);
1628 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1629 total_size = avio_tell(oc->pb);
1633 av_bprint_init(&buf_script, 0, 1);
1634 for (i = 0; i < nb_output_streams; i++) {
1636 ost = output_streams[i];
1638 if (!ost->stream_copy)
1639 q = ost->quality / (float) FF_QP2LAMBDA;
1641 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1642 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1643 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1644 ost->file_index, ost->index, q);
1646 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1649 frame_number = ost->frame_number;
1650 fps = t > 1 ? frame_number / t : 0;
1651 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1652 frame_number, fps < 9.95, fps, q);
1653 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1654 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1655 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1656 ost->file_index, ost->index, q);
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1662 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1664 for (j = 0; j < 32; j++)
1665 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1668 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1670 double error, error_sum = 0;
1671 double scale, scale_sum = 0;
1673 char type[3] = { 'Y','U','V' };
1674 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1675 for (j = 0; j < 3; j++) {
1676 if (is_last_report) {
1677 error = enc->error[j];
1678 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1680 error = ost->error[j];
1681 scale = enc->width * enc->height * 255.0 * 255.0;
1687 p = psnr(error / scale);
1688 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1689 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1690 ost->file_index, ost->index, type[j] | 32, p);
1692 p = psnr(error_sum / scale_sum);
1693 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1694 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1695 ost->file_index, ost->index, p);
1699 /* compute min output value */
1700 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1701 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1702 ost->st->time_base, AV_TIME_BASE_Q));
1704 nb_frames_drop += ost->last_dropped;
1707 secs = FFABS(pts) / AV_TIME_BASE;
1708 us = FFABS(pts) % AV_TIME_BASE;
1714 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1715 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1717 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1719 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1720 "size=%8.0fkB time=", total_size / 1024.0);
1722 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1723 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1724 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1725 (100 * us) / AV_TIME_BASE);
1728 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1729 av_bprintf(&buf_script, "bitrate=N/A\n");
1731 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1732 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1735 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1736 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1737 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1738 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1739 hours, mins, secs, us);
1741 if (nb_frames_dup || nb_frames_drop)
1742 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1743 nb_frames_dup, nb_frames_drop);
1744 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1745 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1748 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1749 av_bprintf(&buf_script, "speed=N/A\n");
1751 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1752 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1755 if (print_stats || is_last_report) {
1756 const char end = is_last_report ? '\n' : '\r';
1757 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1758 fprintf(stderr, "%s %c", buf, end);
1760 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1765 if (progress_avio) {
1766 av_bprintf(&buf_script, "progress=%s\n",
1767 is_last_report ? "end" : "continue");
1768 avio_write(progress_avio, buf_script.str,
1769 FFMIN(buf_script.len, buf_script.size - 1));
1770 avio_flush(progress_avio);
1771 av_bprint_finalize(&buf_script, NULL);
1772 if (is_last_report) {
1773 if ((ret = avio_closep(&progress_avio)) < 0)
1774 av_log(NULL, AV_LOG_ERROR,
1775 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1780 print_final_stats(total_size);
1783 static void flush_encoders(void)
1787 for (i = 0; i < nb_output_streams; i++) {
1788 OutputStream *ost = output_streams[i];
1789 AVCodecContext *enc = ost->enc_ctx;
1790 OutputFile *of = output_files[ost->file_index];
1791 int stop_encoding = 0;
1793 if (!ost->encoding_needed)
1796 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1798 #if FF_API_LAVF_FMT_RAWPICTURE
1799 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1803 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1806 avcodec_send_frame(enc, NULL);
1809 const char *desc = NULL;
1811 switch (enc->codec_type) {
1812 case AVMEDIA_TYPE_AUDIO:
1815 case AVMEDIA_TYPE_VIDEO:
1825 av_init_packet(&pkt);
1829 update_benchmark(NULL);
1830 ret = avcodec_receive_packet(enc, &pkt);
1831 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1832 if (ret < 0 && ret != AVERROR_EOF) {
1833 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1838 if (ost->logfile && enc->stats_out) {
1839 fprintf(ost->logfile, "%s", enc->stats_out);
1841 if (ret == AVERROR_EOF) {
1845 if (ost->finished & MUXER_FINISHED) {
1846 av_packet_unref(&pkt);
1849 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1850 pkt_size = pkt.size;
1851 output_packet(of, &pkt, ost);
1852 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1853 do_video_stats(ost, pkt_size);
1864 * Check whether a packet from ist should be written into ost at this time
1866 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1868 OutputFile *of = output_files[ost->file_index];
1869 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1871 if (ost->source_index != ist_index)
1877 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1883 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1885 OutputFile *of = output_files[ost->file_index];
1886 InputFile *f = input_files [ist->file_index];
1887 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1888 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1892 av_init_packet(&opkt);
1894 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1895 !ost->copy_initial_nonkeyframes)
1898 if (!ost->frame_number && !ost->copy_prior_start) {
1899 int64_t comp_start = start_time;
1900 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1901 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1902 if (pkt->pts == AV_NOPTS_VALUE ?
1903 ist->pts < comp_start :
1904 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1908 if (of->recording_time != INT64_MAX &&
1909 ist->pts >= of->recording_time + start_time) {
1910 close_output_stream(ost);
1914 if (f->recording_time != INT64_MAX) {
1915 start_time = f->ctx->start_time;
1916 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1917 start_time += f->start_time;
1918 if (ist->pts >= f->recording_time + start_time) {
1919 close_output_stream(ost);
1924 /* force the input stream PTS */
1925 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1928 if (pkt->pts != AV_NOPTS_VALUE)
1929 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1931 opkt.pts = AV_NOPTS_VALUE;
1933 if (pkt->dts == AV_NOPTS_VALUE)
1934 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1936 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1937 opkt.dts -= ost_tb_start_time;
1939 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1940 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1942 duration = ist->dec_ctx->frame_size;
1943 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1944 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1945 ost->st->time_base) - ost_tb_start_time;
1948 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1949 opkt.flags = pkt->flags;
1950 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1951 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1952 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1953 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1954 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1956 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1957 &opkt.data, &opkt.size,
1958 pkt->data, pkt->size,
1959 pkt->flags & AV_PKT_FLAG_KEY);
1961 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1966 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1971 opkt.data = pkt->data;
1972 opkt.size = pkt->size;
1974 av_copy_packet_side_data(&opkt, pkt);
1976 #if FF_API_LAVF_FMT_RAWPICTURE
1977 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1978 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1979 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1980 /* store AVPicture in AVPacket, as expected by the output format */
1981 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1983 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1987 opkt.data = (uint8_t *)&pict;
1988 opkt.size = sizeof(AVPicture);
1989 opkt.flags |= AV_PKT_FLAG_KEY;
1993 output_packet(of, &opkt, ost);
1996 int guess_input_channel_layout(InputStream *ist)
1998 AVCodecContext *dec = ist->dec_ctx;
2000 if (!dec->channel_layout) {
2001 char layout_name[256];
2003 if (dec->channels > ist->guess_layout_max)
2005 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2006 if (!dec->channel_layout)
2008 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2009 dec->channels, dec->channel_layout);
2010 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2011 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2016 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2018 if (*got_output || ret<0)
2019 decode_error_stat[ret<0] ++;
2021 if (ret < 0 && exit_on_error)
2024 if (exit_on_error && *got_output && ist) {
2025 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2026 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2032 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2033 // There is the following difference: if you got a frame, you must call
2034 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2035 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2036 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2043 ret = avcodec_send_packet(avctx, pkt);
2044 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2045 // decoded frames with avcodec_receive_frame() until done.
2046 if (ret < 0 && ret != AVERROR_EOF)
2050 ret = avcodec_receive_frame(avctx, frame);
2051 if (ret < 0 && ret != AVERROR(EAGAIN))
2059 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2061 AVFrame *decoded_frame, *f;
2062 AVCodecContext *avctx = ist->dec_ctx;
2063 int i, ret, err = 0, resample_changed;
2064 AVRational decoded_frame_tb;
2066 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2067 return AVERROR(ENOMEM);
2068 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2069 return AVERROR(ENOMEM);
2070 decoded_frame = ist->decoded_frame;
2072 update_benchmark(NULL);
2073 ret = decode(avctx, decoded_frame, got_output, pkt);
2074 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2076 if (ret >= 0 && avctx->sample_rate <= 0) {
2077 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2078 ret = AVERROR_INVALIDDATA;
2081 if (ret != AVERROR_EOF)
2082 check_decode_result(ist, got_output, ret);
2084 if (!*got_output || ret < 0)
2087 ist->samples_decoded += decoded_frame->nb_samples;
2088 ist->frames_decoded++;
2091 /* increment next_dts to use for the case where the input stream does not
2092 have timestamps or there are multiple frames in the packet */
2093 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2095 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2099 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2100 ist->resample_channels != avctx->channels ||
2101 ist->resample_channel_layout != decoded_frame->channel_layout ||
2102 ist->resample_sample_rate != decoded_frame->sample_rate;
2103 if (resample_changed) {
2104 char layout1[64], layout2[64];
2106 if (!guess_input_channel_layout(ist)) {
2107 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2108 "layout for Input Stream #%d.%d\n", ist->file_index,
2112 decoded_frame->channel_layout = avctx->channel_layout;
2114 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2115 ist->resample_channel_layout);
2116 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2117 decoded_frame->channel_layout);
2119 av_log(NULL, AV_LOG_INFO,
2120 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2121 ist->file_index, ist->st->index,
2122 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2123 ist->resample_channels, layout1,
2124 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2125 avctx->channels, layout2);
2127 ist->resample_sample_fmt = decoded_frame->format;
2128 ist->resample_sample_rate = decoded_frame->sample_rate;
2129 ist->resample_channel_layout = decoded_frame->channel_layout;
2130 ist->resample_channels = avctx->channels;
2132 for (i = 0; i < nb_filtergraphs; i++)
2133 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2134 FilterGraph *fg = filtergraphs[i];
2135 if (configure_filtergraph(fg) < 0) {
2136 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2142 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2143 decoded_frame_tb = ist->st->time_base;
2144 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2145 decoded_frame->pts = pkt->pts;
2146 decoded_frame_tb = ist->st->time_base;
2148 decoded_frame->pts = ist->dts;
2149 decoded_frame_tb = AV_TIME_BASE_Q;
2151 if (decoded_frame->pts != AV_NOPTS_VALUE)
2152 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2153 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2154 (AVRational){1, avctx->sample_rate});
2155 ist->nb_samples = decoded_frame->nb_samples;
2156 for (i = 0; i < ist->nb_filters; i++) {
2157 if (i < ist->nb_filters - 1) {
2158 f = ist->filter_frame;
2159 err = av_frame_ref(f, decoded_frame);
2164 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2165 AV_BUFFERSRC_FLAG_PUSH);
2166 if (err == AVERROR_EOF)
2167 err = 0; /* ignore */
2171 decoded_frame->pts = AV_NOPTS_VALUE;
2173 av_frame_unref(ist->filter_frame);
2174 av_frame_unref(decoded_frame);
2175 return err < 0 ? err : ret;
2178 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2180 AVFrame *decoded_frame, *f;
2181 int i, ret = 0, err = 0, resample_changed;
2182 int64_t best_effort_timestamp;
2183 int64_t dts = AV_NOPTS_VALUE;
2184 AVRational *frame_sample_aspect;
2187 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2188 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2190 if (!eof && pkt && pkt->size == 0)
2193 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2194 return AVERROR(ENOMEM);
2195 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2196 return AVERROR(ENOMEM);
2197 decoded_frame = ist->decoded_frame;
2198 if (ist->dts != AV_NOPTS_VALUE)
2199 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2202 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2205 // The old code used to set dts on the drain packet, which does not work
2206 // with the new API anymore.
2208 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2210 return AVERROR(ENOMEM);
2211 ist->dts_buffer = new;
2212 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2215 update_benchmark(NULL);
2216 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2217 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2219 // The following line may be required in some cases where there is no parser
2220 // or the parser does not has_b_frames correctly
2221 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2222 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2223 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2225 av_log(ist->dec_ctx, AV_LOG_WARNING,
2226 "video_delay is larger in decoder than demuxer %d > %d.\n"
2227 "If you want to help, upload a sample "
2228 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2229 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2230 ist->dec_ctx->has_b_frames,
2231 ist->st->codecpar->video_delay);
2234 if (ret != AVERROR_EOF)
2235 check_decode_result(ist, got_output, ret);
2237 if (*got_output && ret >= 0) {
2238 if (ist->dec_ctx->width != decoded_frame->width ||
2239 ist->dec_ctx->height != decoded_frame->height ||
2240 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2241 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2242 decoded_frame->width,
2243 decoded_frame->height,
2244 decoded_frame->format,
2245 ist->dec_ctx->width,
2246 ist->dec_ctx->height,
2247 ist->dec_ctx->pix_fmt);
2251 if (!*got_output || ret < 0)
2254 if(ist->top_field_first>=0)
2255 decoded_frame->top_field_first = ist->top_field_first;
2257 ist->frames_decoded++;
2259 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2260 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2264 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2266 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2268 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2269 best_effort_timestamp = ist->dts_buffer[0];
2271 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2272 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2273 ist->nb_dts_buffer--;
2276 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2277 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2279 if (ts != AV_NOPTS_VALUE)
2280 ist->next_pts = ist->pts = ts;
2284 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2285 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2286 ist->st->index, av_ts2str(decoded_frame->pts),
2287 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2288 best_effort_timestamp,
2289 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2290 decoded_frame->key_frame, decoded_frame->pict_type,
2291 ist->st->time_base.num, ist->st->time_base.den);
2294 if (ist->st->sample_aspect_ratio.num)
2295 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2297 resample_changed = ist->resample_width != decoded_frame->width ||
2298 ist->resample_height != decoded_frame->height ||
2299 ist->resample_pix_fmt != decoded_frame->format;
2300 if (resample_changed) {
2301 av_log(NULL, AV_LOG_INFO,
2302 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2303 ist->file_index, ist->st->index,
2304 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2305 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2307 ist->resample_width = decoded_frame->width;
2308 ist->resample_height = decoded_frame->height;
2309 ist->resample_pix_fmt = decoded_frame->format;
2311 for (i = 0; i < nb_filtergraphs; i++) {
2312 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2313 configure_filtergraph(filtergraphs[i]) < 0) {
2314 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2320 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2321 for (i = 0; i < ist->nb_filters; i++) {
2322 if (!frame_sample_aspect->num)
2323 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2325 if (i < ist->nb_filters - 1) {
2326 f = ist->filter_frame;
2327 err = av_frame_ref(f, decoded_frame);
2332 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2333 if (err == AVERROR_EOF) {
2334 err = 0; /* ignore */
2335 } else if (err < 0) {
2336 av_log(NULL, AV_LOG_FATAL,
2337 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2343 av_frame_unref(ist->filter_frame);
2344 av_frame_unref(decoded_frame);
2345 return err < 0 ? err : ret;
2348 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2350 AVSubtitle subtitle;
2351 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2352 &subtitle, got_output, pkt);
2354 check_decode_result(NULL, got_output, ret);
2356 if (ret < 0 || !*got_output) {
2358 sub2video_flush(ist);
2362 if (ist->fix_sub_duration) {
2364 if (ist->prev_sub.got_output) {
2365 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2366 1000, AV_TIME_BASE);
2367 if (end < ist->prev_sub.subtitle.end_display_time) {
2368 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2369 "Subtitle duration reduced from %d to %d%s\n",
2370 ist->prev_sub.subtitle.end_display_time, end,
2371 end <= 0 ? ", dropping it" : "");
2372 ist->prev_sub.subtitle.end_display_time = end;
2375 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2376 FFSWAP(int, ret, ist->prev_sub.ret);
2377 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2385 sub2video_update(ist, &subtitle);
2387 if (!subtitle.num_rects)
2390 ist->frames_decoded++;
2392 for (i = 0; i < nb_output_streams; i++) {
2393 OutputStream *ost = output_streams[i];
2395 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2396 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2399 do_subtitle_out(output_files[ost->file_index], ost, ist, &subtitle);
2403 avsubtitle_free(&subtitle);
2407 static int send_filter_eof(InputStream *ist)
2410 for (i = 0; i < ist->nb_filters; i++) {
2411 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2418 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2419 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2423 int eof_reached = 0;
2426 if (!ist->saw_first_ts) {
2427 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2429 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2430 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2431 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2433 ist->saw_first_ts = 1;
2436 if (ist->next_dts == AV_NOPTS_VALUE)
2437 ist->next_dts = ist->dts;
2438 if (ist->next_pts == AV_NOPTS_VALUE)
2439 ist->next_pts = ist->pts;
2443 av_init_packet(&avpkt);
2450 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2451 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2452 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2453 ist->next_pts = ist->pts = ist->dts;
2456 // while we have more to decode or while the decoder did output something on EOF
2457 while (ist->decoding_needed) {
2461 ist->pts = ist->next_pts;
2462 ist->dts = ist->next_dts;
2464 switch (ist->dec_ctx->codec_type) {
2465 case AVMEDIA_TYPE_AUDIO:
2466 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2468 case AVMEDIA_TYPE_VIDEO:
2469 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2470 if (!repeating || !pkt || got_output) {
2471 if (pkt && pkt->duration) {
2472 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2473 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2474 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2475 duration = ((int64_t)AV_TIME_BASE *
2476 ist->dec_ctx->framerate.den * ticks) /
2477 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2480 if(ist->dts != AV_NOPTS_VALUE && duration) {
2481 ist->next_dts += duration;
2483 ist->next_dts = AV_NOPTS_VALUE;
2487 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2489 case AVMEDIA_TYPE_SUBTITLE:
2492 ret = transcode_subtitles(ist, &avpkt, &got_output);
2493 if (!pkt && ret >= 0)
2500 if (ret == AVERROR_EOF) {
2506 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2507 ist->file_index, ist->st->index, av_err2str(ret));
2510 // Decoding might not terminate if we're draining the decoder, and
2511 // the decoder keeps returning an error.
2512 // This should probably be considered a libavcodec issue.
2513 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2522 // During draining, we might get multiple output frames in this loop.
2523 // ffmpeg.c does not drain the filter chain on configuration changes,
2524 // which means if we send multiple frames at once to the filters, and
2525 // one of those frames changes configuration, the buffered frames will
2526 // be lost. This can upset certain FATE tests.
2527 // Decode only 1 frame per call on EOF to appease these FATE tests.
2528 // The ideal solution would be to rewrite decoding to use the new
2529 // decoding API in a better way.
2536 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2537 /* except when looping we need to flush but not to send an EOF */
2538 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2539 int ret = send_filter_eof(ist);
2541 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2546 /* handle stream copy */
2547 if (!ist->decoding_needed) {
2548 ist->dts = ist->next_dts;
2549 switch (ist->dec_ctx->codec_type) {
2550 case AVMEDIA_TYPE_AUDIO:
2551 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2552 ist->dec_ctx->sample_rate;
2554 case AVMEDIA_TYPE_VIDEO:
2555 if (ist->framerate.num) {
2556 // TODO: Remove work-around for c99-to-c89 issue 7
2557 AVRational time_base_q = AV_TIME_BASE_Q;
2558 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2559 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2560 } else if (pkt->duration) {
2561 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2562 } else if(ist->dec_ctx->framerate.num != 0) {
2563 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2564 ist->next_dts += ((int64_t)AV_TIME_BASE *
2565 ist->dec_ctx->framerate.den * ticks) /
2566 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2570 ist->pts = ist->dts;
2571 ist->next_pts = ist->next_dts;
2573 for (i = 0; pkt && i < nb_output_streams; i++) {
2574 OutputStream *ost = output_streams[i];
2576 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2579 do_streamcopy(ist, ost, pkt);
2582 return !eof_reached;
2585 static void print_sdp(void)
2590 AVIOContext *sdp_pb;
2591 AVFormatContext **avc;
2593 for (i = 0; i < nb_output_files; i++) {
2594 if (!output_files[i]->header_written)
2598 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2601 for (i = 0, j = 0; i < nb_output_files; i++) {
2602 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2603 avc[j] = output_files[i]->ctx;
2611 av_sdp_create(avc, j, sdp, sizeof(sdp));
2613 if (!sdp_filename) {
2614 printf("SDP:\n%s\n", sdp);
2617 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2618 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2620 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2621 avio_closep(&sdp_pb);
2622 av_freep(&sdp_filename);
2630 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2633 for (i = 0; hwaccels[i].name; i++)
2634 if (hwaccels[i].pix_fmt == pix_fmt)
2635 return &hwaccels[i];
2639 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2641 InputStream *ist = s->opaque;
2642 const enum AVPixelFormat *p;
2645 for (p = pix_fmts; *p != -1; p++) {
2646 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2647 const HWAccel *hwaccel;
2649 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2652 hwaccel = get_hwaccel(*p);
2654 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2655 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2658 ret = hwaccel->init(s);
2660 if (ist->hwaccel_id == hwaccel->id) {
2661 av_log(NULL, AV_LOG_FATAL,
2662 "%s hwaccel requested for input stream #%d:%d, "
2663 "but cannot be initialized.\n", hwaccel->name,
2664 ist->file_index, ist->st->index);
2665 return AV_PIX_FMT_NONE;
2670 if (ist->hw_frames_ctx) {
2671 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2672 if (!s->hw_frames_ctx)
2673 return AV_PIX_FMT_NONE;
2676 ist->active_hwaccel_id = hwaccel->id;
2677 ist->hwaccel_pix_fmt = *p;
2684 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2686 InputStream *ist = s->opaque;
2688 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2689 return ist->hwaccel_get_buffer(s, frame, flags);
2691 return avcodec_default_get_buffer2(s, frame, flags);
2694 static int init_input_stream(int ist_index, char *error, int error_len)
2697 InputStream *ist = input_streams[ist_index];
2699 if (ist->decoding_needed) {
2700 AVCodec *codec = ist->dec;
2702 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2703 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2704 return AVERROR(EINVAL);
2707 ist->dec_ctx->opaque = ist;
2708 ist->dec_ctx->get_format = get_format;
2709 ist->dec_ctx->get_buffer2 = get_buffer;
2710 ist->dec_ctx->thread_safe_callbacks = 1;
2712 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2713 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2714 (ist->decoding_needed & DECODING_FOR_OST)) {
2715 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2716 if (ist->decoding_needed & DECODING_FOR_FILTER)
2717 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2720 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2722 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2723 * audio, and video decoders such as cuvid or mediacodec */
2724 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2726 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2727 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2728 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2729 if (ret == AVERROR_EXPERIMENTAL)
2730 abort_codec_experimental(codec, 0);
2732 snprintf(error, error_len,
2733 "Error while opening decoder for input stream "
2735 ist->file_index, ist->st->index, av_err2str(ret));
2738 assert_avoptions(ist->decoder_opts);
2741 ist->next_pts = AV_NOPTS_VALUE;
2742 ist->next_dts = AV_NOPTS_VALUE;
2747 static InputStream *get_input_stream(OutputStream *ost)
2749 if (ost->source_index >= 0)
2750 return input_streams[ost->source_index];
2754 static int compare_int64(const void *a, const void *b)
2756 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2759 /* open the muxer when all the streams are initialized */
2760 static int check_init_output_file(OutputFile *of, int file_index)
2764 for (i = 0; i < of->ctx->nb_streams; i++) {
2765 OutputStream *ost = output_streams[of->ost_index + i];
2766 if (!ost->initialized)
2770 of->ctx->interrupt_callback = int_cb;
2772 ret = avformat_write_header(of->ctx, &of->opts);
2774 av_log(NULL, AV_LOG_ERROR,
2775 "Could not write header for output file #%d "
2776 "(incorrect codec parameters ?): %s",
2777 file_index, av_err2str(ret));
2780 //assert_avoptions(of->opts);
2781 of->header_written = 1;
2783 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2785 if (sdp_filename || want_sdp)
2788 /* flush the muxing queues */
2789 for (i = 0; i < of->ctx->nb_streams; i++) {
2790 OutputStream *ost = output_streams[of->ost_index + i];
2792 while (av_fifo_size(ost->muxing_queue)) {
2794 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2795 write_packet(of, &pkt, ost);
2802 static int init_output_bsfs(OutputStream *ost)
2807 if (!ost->nb_bitstream_filters)
2810 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2811 ctx = ost->bsf_ctx[i];
2813 ret = avcodec_parameters_copy(ctx->par_in,
2814 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2818 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2820 ret = av_bsf_init(ctx);
2822 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2823 ost->bsf_ctx[i]->filter->name);
2828 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2829 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2833 ost->st->time_base = ctx->time_base_out;
2838 static int init_output_stream_streamcopy(OutputStream *ost)
2840 OutputFile *of = output_files[ost->file_index];
2841 InputStream *ist = get_input_stream(ost);
2842 AVCodecParameters *par_dst = ost->st->codecpar;
2843 AVCodecParameters *par_src = ost->ref_par;
2846 uint64_t extra_size;
2848 av_assert0(ist && !ost->filter);
2850 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2851 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2853 av_log(NULL, AV_LOG_FATAL,
2854 "Error setting up codec context options.\n");
2857 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2859 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2861 if (extra_size > INT_MAX) {
2862 return AVERROR(EINVAL);
2865 /* if stream_copy is selected, no need to decode or encode */
2866 par_dst->codec_id = par_src->codec_id;
2867 par_dst->codec_type = par_src->codec_type;
2869 if (!par_dst->codec_tag) {
2870 unsigned int codec_tag;
2871 if (!of->ctx->oformat->codec_tag ||
2872 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2873 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2874 par_dst->codec_tag = par_src->codec_tag;
2877 par_dst->bit_rate = par_src->bit_rate;
2878 par_dst->field_order = par_src->field_order;
2879 par_dst->chroma_location = par_src->chroma_location;
2881 if (par_src->extradata_size) {
2882 par_dst->extradata = av_mallocz(extra_size);
2883 if (!par_dst->extradata) {
2884 return AVERROR(ENOMEM);
2886 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2887 par_dst->extradata_size = par_src->extradata_size;
2889 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2890 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2892 if (!ost->frame_rate.num)
2893 ost->frame_rate = ist->framerate;
2894 ost->st->avg_frame_rate = ost->frame_rate;
2896 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2900 // copy timebase while removing common factors
2901 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2903 if (ist->st->nb_side_data) {
2904 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2905 sizeof(*ist->st->side_data));
2906 if (!ost->st->side_data)
2907 return AVERROR(ENOMEM);
2909 ost->st->nb_side_data = 0;
2910 for (i = 0; i < ist->st->nb_side_data; i++) {
2911 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2912 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2914 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2917 sd_dst->data = av_malloc(sd_src->size);
2919 return AVERROR(ENOMEM);
2920 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2921 sd_dst->size = sd_src->size;
2922 sd_dst->type = sd_src->type;
2923 ost->st->nb_side_data++;
2927 ost->parser = av_parser_init(par_dst->codec_id);
2928 ost->parser_avctx = avcodec_alloc_context3(NULL);
2929 if (!ost->parser_avctx)
2930 return AVERROR(ENOMEM);
2932 switch (par_dst->codec_type) {
2933 case AVMEDIA_TYPE_AUDIO:
2934 if (audio_volume != 256) {
2935 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2938 par_dst->channel_layout = par_src->channel_layout;
2939 par_dst->sample_rate = par_src->sample_rate;
2940 par_dst->channels = par_src->channels;
2941 par_dst->frame_size = par_src->frame_size;
2942 par_dst->block_align = par_src->block_align;
2943 par_dst->initial_padding = par_src->initial_padding;
2944 par_dst->trailing_padding = par_src->trailing_padding;
2945 par_dst->profile = par_src->profile;
2946 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2947 par_dst->block_align= 0;
2948 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2949 par_dst->block_align= 0;
2951 case AVMEDIA_TYPE_VIDEO:
2952 par_dst->format = par_src->format;
2953 par_dst->color_space = par_src->color_space;
2954 par_dst->color_range = par_src->color_range;
2955 par_dst->color_primaries = par_src->color_primaries;
2956 par_dst->color_trc = par_src->color_trc;
2957 par_dst->width = par_src->width;
2958 par_dst->height = par_src->height;
2959 par_dst->video_delay = par_src->video_delay;
2960 par_dst->profile = par_src->profile;
2961 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2963 av_mul_q(ost->frame_aspect_ratio,
2964 (AVRational){ par_dst->height, par_dst->width });
2965 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2966 "with stream copy may produce invalid files\n");
2968 else if (ist->st->sample_aspect_ratio.num)
2969 sar = ist->st->sample_aspect_ratio;
2971 sar = par_src->sample_aspect_ratio;
2972 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2973 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2974 ost->st->r_frame_rate = ist->st->r_frame_rate;
2976 case AVMEDIA_TYPE_SUBTITLE:
2977 par_dst->width = par_src->width;
2978 par_dst->height = par_src->height;
2980 case AVMEDIA_TYPE_UNKNOWN:
2981 case AVMEDIA_TYPE_DATA:
2982 case AVMEDIA_TYPE_ATTACHMENT:
2991 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2995 if (ost->encoding_needed) {
2996 AVCodec *codec = ost->enc;
2997 AVCodecContext *dec = NULL;
3000 if ((ist = get_input_stream(ost)))
3002 if (dec && dec->subtitle_header) {
3003 /* ASS code assumes this buffer is null terminated so add extra byte. */
3004 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3005 if (!ost->enc_ctx->subtitle_header)
3006 return AVERROR(ENOMEM);
3007 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3008 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3010 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3011 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3012 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3014 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3015 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3016 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3018 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3019 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3020 if (!ost->enc_ctx->hw_frames_ctx)
3021 return AVERROR(ENOMEM);
3024 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3025 if (ret == AVERROR_EXPERIMENTAL)
3026 abort_codec_experimental(codec, 1);
3027 snprintf(error, error_len,
3028 "Error while opening encoder for output stream #%d:%d - "
3029 "maybe incorrect parameters such as bit_rate, rate, width or height",
3030 ost->file_index, ost->index);
3033 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3034 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3035 av_buffersink_set_frame_size(ost->filter->filter,
3036 ost->enc_ctx->frame_size);
3037 assert_avoptions(ost->encoder_opts);
3038 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3039 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3040 " It takes bits/s as argument, not kbits/s\n");
3042 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3044 av_log(NULL, AV_LOG_FATAL,
3045 "Error initializing the output stream codec context.\n");
3049 * FIXME: ost->st->codec should't be needed here anymore.
3051 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3055 if (ost->enc_ctx->nb_coded_side_data) {
3058 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3059 sizeof(*ost->st->side_data));
3060 if (!ost->st->side_data)
3061 return AVERROR(ENOMEM);
3063 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3064 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3065 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3067 sd_dst->data = av_malloc(sd_src->size);
3069 return AVERROR(ENOMEM);
3070 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3071 sd_dst->size = sd_src->size;
3072 sd_dst->type = sd_src->type;
3073 ost->st->nb_side_data++;
3077 // copy timebase while removing common factors
3078 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3079 ost->st->codec->codec= ost->enc_ctx->codec;
3080 } else if (ost->stream_copy) {
3081 ret = init_output_stream_streamcopy(ost);
3086 * FIXME: will the codec context used by the parser during streamcopy
3087 * This should go away with the new parser API.
3089 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3094 /* initialize bitstream filters for the output stream
3095 * needs to be done here, because the codec id for streamcopy is not
3096 * known until now */
3097 ret = init_output_bsfs(ost);
3101 ost->initialized = 1;
3103 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3110 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3111 AVCodecContext *avctx)
3114 int n = 1, i, size, index = 0;
3117 for (p = kf; *p; p++)
3121 pts = av_malloc_array(size, sizeof(*pts));
3123 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3128 for (i = 0; i < n; i++) {
3129 char *next = strchr(p, ',');
3134 if (!memcmp(p, "chapters", 8)) {
3136 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3139 if (avf->nb_chapters > INT_MAX - size ||
3140 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3142 av_log(NULL, AV_LOG_FATAL,
3143 "Could not allocate forced key frames array.\n");
3146 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3147 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3149 for (j = 0; j < avf->nb_chapters; j++) {
3150 AVChapter *c = avf->chapters[j];
3151 av_assert1(index < size);
3152 pts[index++] = av_rescale_q(c->start, c->time_base,
3153 avctx->time_base) + t;
3158 t = parse_time_or_die("force_key_frames", p, 1);
3159 av_assert1(index < size);
3160 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3167 av_assert0(index == size);
3168 qsort(pts, size, sizeof(*pts), compare_int64);
3169 ost->forced_kf_count = size;
3170 ost->forced_kf_pts = pts;
3173 static void report_new_stream(int input_index, AVPacket *pkt)
3175 InputFile *file = input_files[input_index];
3176 AVStream *st = file->ctx->streams[pkt->stream_index];
3178 if (pkt->stream_index < file->nb_streams_warn)
3180 av_log(file->ctx, AV_LOG_WARNING,
3181 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3182 av_get_media_type_string(st->codecpar->codec_type),
3183 input_index, pkt->stream_index,
3184 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3185 file->nb_streams_warn = pkt->stream_index + 1;
3188 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3190 AVDictionaryEntry *e;
3192 uint8_t *encoder_string;
3193 int encoder_string_len;
3194 int format_flags = 0;
3195 int codec_flags = 0;
3197 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3200 e = av_dict_get(of->opts, "fflags", NULL, 0);
3202 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3205 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3207 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3209 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3212 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3215 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3216 encoder_string = av_mallocz(encoder_string_len);
3217 if (!encoder_string)
3220 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3221 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3223 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3224 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3225 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3226 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3229 static int transcode_init(void)
3231 int ret = 0, i, j, k;
3232 AVFormatContext *oc;
3235 char error[1024] = {0};
3237 for (i = 0; i < nb_filtergraphs; i++) {
3238 FilterGraph *fg = filtergraphs[i];
3239 for (j = 0; j < fg->nb_outputs; j++) {
3240 OutputFilter *ofilter = fg->outputs[j];
3241 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3243 if (fg->nb_inputs != 1)
3245 for (k = nb_input_streams-1; k >= 0 ; k--)
3246 if (fg->inputs[0]->ist == input_streams[k])
3248 ofilter->ost->source_index = k;
3252 /* init framerate emulation */
3253 for (i = 0; i < nb_input_files; i++) {
3254 InputFile *ifile = input_files[i];
3255 if (ifile->rate_emu)
3256 for (j = 0; j < ifile->nb_streams; j++)
3257 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3260 /* for each output stream, we compute the right encoding parameters */
3261 for (i = 0; i < nb_output_streams; i++) {
3262 ost = output_streams[i];
3263 oc = output_files[ost->file_index]->ctx;
3264 ist = get_input_stream(ost);
3266 if (ost->attachment_filename)
3270 ost->st->disposition = ist->st->disposition;
3272 for (j=0; j<oc->nb_streams; j++) {
3273 AVStream *st = oc->streams[j];
3274 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3277 if (j == oc->nb_streams)
3278 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3279 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3280 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3283 if (!ost->stream_copy) {
3284 AVCodecContext *enc_ctx = ost->enc_ctx;
3285 AVCodecContext *dec_ctx = NULL;
3287 set_encoder_id(output_files[ost->file_index], ost);
3290 dec_ctx = ist->dec_ctx;
3292 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3296 if (qsv_transcode_init(ost))
3301 if (cuvid_transcode_init(ost))
3305 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3306 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3307 filtergraph_is_simple(ost->filter->graph)) {
3308 FilterGraph *fg = ost->filter->graph;
3309 if (configure_filtergraph(fg)) {
3310 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3315 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3316 if (!ost->frame_rate.num)
3317 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3318 if (ist && !ost->frame_rate.num)
3319 ost->frame_rate = ist->framerate;
3320 if (ist && !ost->frame_rate.num)
3321 ost->frame_rate = ist->st->r_frame_rate;
3322 if (ist && !ost->frame_rate.num) {
3323 ost->frame_rate = (AVRational){25, 1};
3324 av_log(NULL, AV_LOG_WARNING,
3326 "about the input framerate is available. Falling "
3327 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3328 "if you want a different framerate.\n",
3329 ost->file_index, ost->index);
3331 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3332 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3333 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3334 ost->frame_rate = ost->enc->supported_framerates[idx];
3336 // reduce frame rate for mpeg4 to be within the spec limits
3337 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3338 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3339 ost->frame_rate.num, ost->frame_rate.den, 65535);
3343 switch (enc_ctx->codec_type) {
3344 case AVMEDIA_TYPE_AUDIO:
3345 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3347 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3348 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3349 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3350 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3351 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3352 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3354 case AVMEDIA_TYPE_VIDEO:
3355 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3356 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3357 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3358 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3359 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3360 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3361 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3363 for (j = 0; j < ost->forced_kf_count; j++)
3364 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3366 enc_ctx->time_base);
3368 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3369 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3370 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3371 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3372 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3373 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3374 if (!strncmp(ost->enc->name, "libx264", 7) &&
3375 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3376 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3377 av_log(NULL, AV_LOG_WARNING,
3378 "No pixel format specified, %s for H.264 encoding chosen.\n"
3379 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3380 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3381 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3382 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3383 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3384 av_log(NULL, AV_LOG_WARNING,
3385 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3386 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3387 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3388 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3390 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3391 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3393 ost->st->avg_frame_rate = ost->frame_rate;
3396 enc_ctx->width != dec_ctx->width ||
3397 enc_ctx->height != dec_ctx->height ||
3398 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3399 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3402 if (ost->forced_keyframes) {
3403 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3404 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3405 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3407 av_log(NULL, AV_LOG_ERROR,
3408 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3411 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3412 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3413 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3414 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3416 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3417 // parse it only for static kf timings
3418 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3419 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3423 case AVMEDIA_TYPE_SUBTITLE:
3424 enc_ctx->time_base = (AVRational){1, 1000};
3425 if (!enc_ctx->width) {
3426 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3427 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3430 case AVMEDIA_TYPE_DATA:
3438 if (ost->disposition) {
3439 static const AVOption opts[] = {
3440 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3441 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3442 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3443 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3444 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3445 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3446 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3447 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3448 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3449 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3450 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3451 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3452 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3453 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3456 static const AVClass class = {
3458 .item_name = av_default_item_name,
3460 .version = LIBAVUTIL_VERSION_INT,
3462 const AVClass *pclass = &class;
3464 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3470 /* init input streams */
3471 for (i = 0; i < nb_input_streams; i++)
3472 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3473 for (i = 0; i < nb_output_streams; i++) {
3474 ost = output_streams[i];
3475 avcodec_close(ost->enc_ctx);
3480 /* open each encoder */
3481 for (i = 0; i < nb_output_streams; i++) {
3482 ret = init_output_stream(output_streams[i], error, sizeof(error));
3487 /* discard unused programs */
3488 for (i = 0; i < nb_input_files; i++) {
3489 InputFile *ifile = input_files[i];
3490 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3491 AVProgram *p = ifile->ctx->programs[j];
3492 int discard = AVDISCARD_ALL;
3494 for (k = 0; k < p->nb_stream_indexes; k++)
3495 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3496 discard = AVDISCARD_DEFAULT;
3499 p->discard = discard;
3503 /* write headers for files with no streams */
3504 for (i = 0; i < nb_output_files; i++) {
3505 oc = output_files[i]->ctx;
3506 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3507 ret = check_init_output_file(output_files[i], i);
3514 /* dump the stream mapping */
3515 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3516 for (i = 0; i < nb_input_streams; i++) {
3517 ist = input_streams[i];
3519 for (j = 0; j < ist->nb_filters; j++) {
3520 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3521 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3522 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3523 ist->filters[j]->name);
3524 if (nb_filtergraphs > 1)
3525 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3526 av_log(NULL, AV_LOG_INFO, "\n");
3531 for (i = 0; i < nb_output_streams; i++) {
3532 ost = output_streams[i];
3534 if (ost->attachment_filename) {
3535 /* an attached file */
3536 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3537 ost->attachment_filename, ost->file_index, ost->index);
3541 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3542 /* output from a complex graph */
3543 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3544 if (nb_filtergraphs > 1)
3545 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3547 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3548 ost->index, ost->enc ? ost->enc->name : "?");
3552 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3553 input_streams[ost->source_index]->file_index,
3554 input_streams[ost->source_index]->st->index,
3557 if (ost->sync_ist != input_streams[ost->source_index])
3558 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3559 ost->sync_ist->file_index,
3560 ost->sync_ist->st->index);
3561 if (ost->stream_copy)
3562 av_log(NULL, AV_LOG_INFO, " (copy)");
3564 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3565 const AVCodec *out_codec = ost->enc;
3566 const char *decoder_name = "?";
3567 const char *in_codec_name = "?";
3568 const char *encoder_name = "?";
3569 const char *out_codec_name = "?";
3570 const AVCodecDescriptor *desc;
3573 decoder_name = in_codec->name;
3574 desc = avcodec_descriptor_get(in_codec->id);
3576 in_codec_name = desc->name;
3577 if (!strcmp(decoder_name, in_codec_name))
3578 decoder_name = "native";
3582 encoder_name = out_codec->name;
3583 desc = avcodec_descriptor_get(out_codec->id);
3585 out_codec_name = desc->name;
3586 if (!strcmp(encoder_name, out_codec_name))
3587 encoder_name = "native";
3590 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3591 in_codec_name, decoder_name,
3592 out_codec_name, encoder_name);
3594 av_log(NULL, AV_LOG_INFO, "\n");
3598 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3602 transcode_init_done = 1;
3607 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3608 static int need_output(void)
3612 for (i = 0; i < nb_output_streams; i++) {
3613 OutputStream *ost = output_streams[i];
3614 OutputFile *of = output_files[ost->file_index];
3615 AVFormatContext *os = output_files[ost->file_index]->ctx;
3617 if (ost->finished ||
3618 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3620 if (ost->frame_number >= ost->max_frames) {
3622 for (j = 0; j < of->ctx->nb_streams; j++)
3623 close_output_stream(output_streams[of->ost_index + j]);
3634 * Select the output stream to process.
3636 * @return selected output stream, or NULL if none available
3638 static OutputStream *choose_output(void)
3641 int64_t opts_min = INT64_MAX;
3642 OutputStream *ost_min = NULL;
3644 for (i = 0; i < nb_output_streams; i++) {
3645 OutputStream *ost = output_streams[i];
3646 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3647 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3649 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3650 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3652 if (!ost->finished && opts < opts_min) {
3654 ost_min = ost->unavailable ? NULL : ost;
3660 static void set_tty_echo(int on)
3664 if (tcgetattr(0, &tty) == 0) {
3665 if (on) tty.c_lflag |= ECHO;
3666 else tty.c_lflag &= ~ECHO;
3667 tcsetattr(0, TCSANOW, &tty);
3672 static int check_keyboard_interaction(int64_t cur_time)
3675 static int64_t last_time;
3676 if (received_nb_signals)
3677 return AVERROR_EXIT;
3678 /* read_key() returns 0 on EOF */
3679 if(cur_time - last_time >= 100000 && !run_as_daemon){
3681 last_time = cur_time;
3685 return AVERROR_EXIT;
3686 if (key == '+') av_log_set_level(av_log_get_level()+10);
3687 if (key == '-') av_log_set_level(av_log_get_level()-10);
3688 if (key == 's') qp_hist ^= 1;
3691 do_hex_dump = do_pkt_dump = 0;
3692 } else if(do_pkt_dump){
3696 av_log_set_level(AV_LOG_DEBUG);
3698 if (key == 'c' || key == 'C'){
3699 char buf[4096], target[64], command[256], arg[256] = {0};
3702 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3705 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3710 fprintf(stderr, "\n");
3712 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3713 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3714 target, time, command, arg);
3715 for (i = 0; i < nb_filtergraphs; i++) {
3716 FilterGraph *fg = filtergraphs[i];
3719 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3720 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3721 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3722 } else if (key == 'c') {
3723 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3724 ret = AVERROR_PATCHWELCOME;
3726 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3728 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3733 av_log(NULL, AV_LOG_ERROR,
3734 "Parse error, at least 3 arguments were expected, "
3735 "only %d given in string '%s'\n", n, buf);
3738 if (key == 'd' || key == 'D'){
3741 debug = input_streams[0]->st->codec->debug<<1;
3742 if(!debug) debug = 1;
3743 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3750 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3755 fprintf(stderr, "\n");
3756 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3757 fprintf(stderr,"error parsing debug value\n");
3759 for(i=0;i<nb_input_streams;i++) {
3760 input_streams[i]->st->codec->debug = debug;
3762 for(i=0;i<nb_output_streams;i++) {
3763 OutputStream *ost = output_streams[i];
3764 ost->enc_ctx->debug = debug;
3766 if(debug) av_log_set_level(AV_LOG_DEBUG);
3767 fprintf(stderr,"debug=%d\n", debug);
3770 fprintf(stderr, "key function\n"
3771 "? show this help\n"
3772 "+ increase verbosity\n"
3773 "- decrease verbosity\n"
3774 "c Send command to first matching filter supporting it\n"
3775 "C Send/Que command to all matching filters\n"
3776 "D cycle through available debug modes\n"
3777 "h dump packets/hex press to cycle through the 3 states\n"
3779 "s Show QP histogram\n"
3786 static void *input_thread(void *arg)
3789 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3794 ret = av_read_frame(f->ctx, &pkt);
3796 if (ret == AVERROR(EAGAIN)) {
3801 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3804 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3805 if (flags && ret == AVERROR(EAGAIN)) {
3807 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3808 av_log(f->ctx, AV_LOG_WARNING,
3809 "Thread message queue blocking; consider raising the "
3810 "thread_queue_size option (current value: %d)\n",
3811 f->thread_queue_size);
3814 if (ret != AVERROR_EOF)
3815 av_log(f->ctx, AV_LOG_ERROR,
3816 "Unable to send packet to main thread: %s\n",
3818 av_packet_unref(&pkt);
3819 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3827 static void free_input_threads(void)
3831 for (i = 0; i < nb_input_files; i++) {
3832 InputFile *f = input_files[i];
3835 if (!f || !f->in_thread_queue)
3837 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3838 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3839 av_packet_unref(&pkt);
3841 pthread_join(f->thread, NULL);
3843 av_thread_message_queue_free(&f->in_thread_queue);
3847 static int init_input_threads(void)
3851 if (nb_input_files == 1)
3854 for (i = 0; i < nb_input_files; i++) {
3855 InputFile *f = input_files[i];
3857 if (f->ctx->pb ? !f->ctx->pb->seekable :
3858 strcmp(f->ctx->iformat->name, "lavfi"))
3859 f->non_blocking = 1;
3860 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3861 f->thread_queue_size, sizeof(AVPacket));
3865 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3866 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3867 av_thread_message_queue_free(&f->in_thread_queue);
3868 return AVERROR(ret);
3874 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3876 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3878 AV_THREAD_MESSAGE_NONBLOCK : 0);
3882 static int get_input_packet(InputFile *f, AVPacket *pkt)
3886 for (i = 0; i < f->nb_streams; i++) {
3887 InputStream *ist = input_streams[f->ist_index + i];
3888 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3889 int64_t now = av_gettime_relative() - ist->start;
3891 return AVERROR(EAGAIN);
3896 if (nb_input_files > 1)
3897 return get_input_packet_mt(f, pkt);
3899 return av_read_frame(f->ctx, pkt);
3902 static int got_eagain(void)
3905 for (i = 0; i < nb_output_streams; i++)
3906 if (output_streams[i]->unavailable)
3911 static void reset_eagain(void)
3914 for (i = 0; i < nb_input_files; i++)
3915 input_files[i]->eagain = 0;
3916 for (i = 0; i < nb_output_streams; i++)
3917 output_streams[i]->unavailable = 0;
3920 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3921 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3922 AVRational time_base)
3928 return tmp_time_base;
3931 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3934 return tmp_time_base;
3940 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3943 AVCodecContext *avctx;
3944 int i, ret, has_audio = 0;
3945 int64_t duration = 0;
3947 ret = av_seek_frame(is, -1, is->start_time, 0);
3951 for (i = 0; i < ifile->nb_streams; i++) {
3952 ist = input_streams[ifile->ist_index + i];
3953 avctx = ist->dec_ctx;
3956 if (ist->decoding_needed) {
3957 process_input_packet(ist, NULL, 1);
3958 avcodec_flush_buffers(avctx);
3961 /* duration is the length of the last frame in a stream
3962 * when audio stream is present we don't care about
3963 * last video frame length because it's not defined exactly */
3964 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3968 for (i = 0; i < ifile->nb_streams; i++) {
3969 ist = input_streams[ifile->ist_index + i];
3970 avctx = ist->dec_ctx;
3973 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3974 AVRational sample_rate = {1, avctx->sample_rate};
3976 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3980 if (ist->framerate.num) {
3981 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3982 } else if (ist->st->avg_frame_rate.num) {
3983 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3984 } else duration = 1;
3986 if (!ifile->duration)
3987 ifile->time_base = ist->st->time_base;
3988 /* the total duration of the stream, max_pts - min_pts is
3989 * the duration of the stream without the last frame */
3990 duration += ist->max_pts - ist->min_pts;
3991 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3995 if (ifile->loop > 0)
4003 * - 0 -- one packet was read and processed
4004 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4005 * this function should be called again
4006 * - AVERROR_EOF -- this function should not be called again
4008 static int process_input(int file_index)
4010 InputFile *ifile = input_files[file_index];
4011 AVFormatContext *is;
4019 ret = get_input_packet(ifile, &pkt);
4021 if (ret == AVERROR(EAGAIN)) {
4025 if (ret < 0 && ifile->loop) {
4026 if ((ret = seek_to_start(ifile, is)) < 0)
4028 ret = get_input_packet(ifile, &pkt);
4029 if (ret == AVERROR(EAGAIN)) {
4035 if (ret != AVERROR_EOF) {
4036 print_error(is->filename, ret);
4041 for (i = 0; i < ifile->nb_streams; i++) {
4042 ist = input_streams[ifile->ist_index + i];
4043 if (ist->decoding_needed) {
4044 ret = process_input_packet(ist, NULL, 0);
4049 /* mark all outputs that don't go through lavfi as finished */
4050 for (j = 0; j < nb_output_streams; j++) {
4051 OutputStream *ost = output_streams[j];
4053 if (ost->source_index == ifile->ist_index + i &&
4054 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4055 finish_output_stream(ost);
4059 ifile->eof_reached = 1;
4060 return AVERROR(EAGAIN);
4066 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4067 is->streams[pkt.stream_index]);
4069 /* the following test is needed in case new streams appear
4070 dynamically in stream : we ignore them */
4071 if (pkt.stream_index >= ifile->nb_streams) {
4072 report_new_stream(file_index, &pkt);
4073 goto discard_packet;
4076 ist = input_streams[ifile->ist_index + pkt.stream_index];
4078 ist->data_size += pkt.size;
4082 goto discard_packet;
4084 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4085 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4090 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4091 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4092 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4093 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4094 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4095 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4096 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4097 av_ts2str(input_files[ist->file_index]->ts_offset),
4098 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4101 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4102 int64_t stime, stime2;
4103 // Correcting starttime based on the enabled streams
4104 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4105 // so we instead do it here as part of discontinuity handling
4106 if ( ist->next_dts == AV_NOPTS_VALUE
4107 && ifile->ts_offset == -is->start_time
4108 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4109 int64_t new_start_time = INT64_MAX;
4110 for (i=0; i<is->nb_streams; i++) {
4111 AVStream *st = is->streams[i];
4112 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4114 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4116 if (new_start_time > is->start_time) {
4117 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4118 ifile->ts_offset = -new_start_time;
4122 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4123 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4124 ist->wrap_correction_done = 1;
4126 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4127 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4128 ist->wrap_correction_done = 0;
4130 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4131 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4132 ist->wrap_correction_done = 0;
4136 /* add the stream-global side data to the first packet */
4137 if (ist->nb_packets == 1) {
4138 if (ist->st->nb_side_data)
4139 av_packet_split_side_data(&pkt);
4140 for (i = 0; i < ist->st->nb_side_data; i++) {
4141 AVPacketSideData *src_sd = &ist->st->side_data[i];
4144 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4146 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4149 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4153 memcpy(dst_data, src_sd->data, src_sd->size);
4157 if (pkt.dts != AV_NOPTS_VALUE)
4158 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4159 if (pkt.pts != AV_NOPTS_VALUE)
4160 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4162 if (pkt.pts != AV_NOPTS_VALUE)
4163 pkt.pts *= ist->ts_scale;
4164 if (pkt.dts != AV_NOPTS_VALUE)
4165 pkt.dts *= ist->ts_scale;
4167 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4168 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4169 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4170 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4171 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4172 int64_t delta = pkt_dts - ifile->last_ts;
4173 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4174 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4175 ifile->ts_offset -= delta;
4176 av_log(NULL, AV_LOG_DEBUG,
4177 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4178 delta, ifile->ts_offset);
4179 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4180 if (pkt.pts != AV_NOPTS_VALUE)
4181 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4185 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4186 if (pkt.pts != AV_NOPTS_VALUE) {
4187 pkt.pts += duration;
4188 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4189 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4192 if (pkt.dts != AV_NOPTS_VALUE)
4193 pkt.dts += duration;
4195 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4196 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4197 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4198 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4200 int64_t delta = pkt_dts - ist->next_dts;
4201 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4202 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4203 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4204 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4205 ifile->ts_offset -= delta;
4206 av_log(NULL, AV_LOG_DEBUG,
4207 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4208 delta, ifile->ts_offset);
4209 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4210 if (pkt.pts != AV_NOPTS_VALUE)
4211 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4214 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4215 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4216 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4217 pkt.dts = AV_NOPTS_VALUE;
4219 if (pkt.pts != AV_NOPTS_VALUE){
4220 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4221 delta = pkt_pts - ist->next_dts;
4222 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4223 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4224 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4225 pkt.pts = AV_NOPTS_VALUE;
4231 if (pkt.dts != AV_NOPTS_VALUE)
4232 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4235 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4236 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4237 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4238 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4239 av_ts2str(input_files[ist->file_index]->ts_offset),
4240 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4243 sub2video_heartbeat(ist, pkt.pts);
4245 process_input_packet(ist, &pkt, 0);
4248 av_packet_unref(&pkt);
4254 * Perform a step of transcoding for the specified filter graph.
4256 * @param[in] graph filter graph to consider
4257 * @param[out] best_ist input stream where a frame would allow to continue
4258 * @return 0 for success, <0 for error
4260 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4263 int nb_requests, nb_requests_max = 0;
4264 InputFilter *ifilter;
4268 ret = avfilter_graph_request_oldest(graph->graph);
4270 return reap_filters(0);
4272 if (ret == AVERROR_EOF) {
4273 ret = reap_filters(1);
4274 for (i = 0; i < graph->nb_outputs; i++)
4275 close_output_stream(graph->outputs[i]->ost);
4278 if (ret != AVERROR(EAGAIN))
4281 for (i = 0; i < graph->nb_inputs; i++) {
4282 ifilter = graph->inputs[i];
4284 if (input_files[ist->file_index]->eagain ||
4285 input_files[ist->file_index]->eof_reached)
4287 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4288 if (nb_requests > nb_requests_max) {
4289 nb_requests_max = nb_requests;
4295 for (i = 0; i < graph->nb_outputs; i++)
4296 graph->outputs[i]->ost->unavailable = 1;
4302 * Run a single step of transcoding.
4304 * @return 0 for success, <0 for error
4306 static int transcode_step(void)
4312 ost = choose_output();
4319 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4324 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4329 av_assert0(ost->source_index >= 0);
4330 ist = input_streams[ost->source_index];
4333 ret = process_input(ist->file_index);
4334 if (ret == AVERROR(EAGAIN)) {
4335 if (input_files[ist->file_index]->eagain)
4336 ost->unavailable = 1;
4341 return ret == AVERROR_EOF ? 0 : ret;
4343 return reap_filters(0);
4347 * The following code is the main loop of the file converter
4349 static int transcode(void)
4352 AVFormatContext *os;
4355 int64_t timer_start;
4356 int64_t total_packets_written = 0;
4358 ret = transcode_init();
4362 if (stdin_interaction) {
4363 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4366 timer_start = av_gettime_relative();
4369 if ((ret = init_input_threads()) < 0)
4373 while (!received_sigterm) {
4374 int64_t cur_time= av_gettime_relative();
4376 /* if 'q' pressed, exits */
4377 if (stdin_interaction)
4378 if (check_keyboard_interaction(cur_time) < 0)
4381 /* check if there's any stream where output is still needed */
4382 if (!need_output()) {
4383 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4387 ret = transcode_step();
4388 if (ret < 0 && ret != AVERROR_EOF) {
4390 av_strerror(ret, errbuf, sizeof(errbuf));
4392 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4396 /* dump report by using the output first video and audio streams */
4397 print_report(0, timer_start, cur_time);
4400 free_input_threads();
4403 /* at the end of stream, we must flush the decoder buffers */
4404 for (i = 0; i < nb_input_streams; i++) {
4405 ist = input_streams[i];
4406 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4407 process_input_packet(ist, NULL, 0);
4414 /* write the trailer if needed and close file */
4415 for (i = 0; i < nb_output_files; i++) {
4416 os = output_files[i]->ctx;
4417 if (!output_files[i]->header_written) {
4418 av_log(NULL, AV_LOG_ERROR,
4419 "Nothing was written into output file %d (%s), because "
4420 "at least one of its streams received no packets.\n",
4424 if ((ret = av_write_trailer(os)) < 0) {
4425 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4431 /* dump report by using the first video and audio streams */
4432 print_report(1, timer_start, av_gettime_relative());
4434 /* close each encoder */
4435 for (i = 0; i < nb_output_streams; i++) {
4436 ost = output_streams[i];
4437 if (ost->encoding_needed) {
4438 av_freep(&ost->enc_ctx->stats_in);
4440 total_packets_written += ost->packets_written;
4443 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4444 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4448 /* close each decoder */
4449 for (i = 0; i < nb_input_streams; i++) {
4450 ist = input_streams[i];
4451 if (ist->decoding_needed) {
4452 avcodec_close(ist->dec_ctx);
4453 if (ist->hwaccel_uninit)
4454 ist->hwaccel_uninit(ist->dec_ctx);
4458 av_buffer_unref(&hw_device_ctx);
4465 free_input_threads();
4468 if (output_streams) {
4469 for (i = 0; i < nb_output_streams; i++) {
4470 ost = output_streams[i];
4473 if (fclose(ost->logfile))
4474 av_log(NULL, AV_LOG_ERROR,
4475 "Error closing logfile, loss of information possible: %s\n",
4476 av_err2str(AVERROR(errno)));
4477 ost->logfile = NULL;
4479 av_freep(&ost->forced_kf_pts);
4480 av_freep(&ost->apad);
4481 av_freep(&ost->disposition);
4482 av_dict_free(&ost->encoder_opts);
4483 av_dict_free(&ost->sws_dict);
4484 av_dict_free(&ost->swr_opts);
4485 av_dict_free(&ost->resample_opts);
4493 static int64_t getutime(void)
4496 struct rusage rusage;
4498 getrusage(RUSAGE_SELF, &rusage);
4499 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4500 #elif HAVE_GETPROCESSTIMES
4502 FILETIME c, e, k, u;
4503 proc = GetCurrentProcess();
4504 GetProcessTimes(proc, &c, &e, &k, &u);
4505 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4507 return av_gettime_relative();
4511 static int64_t getmaxrss(void)
4513 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4514 struct rusage rusage;
4515 getrusage(RUSAGE_SELF, &rusage);
4516 return (int64_t)rusage.ru_maxrss * 1024;
4517 #elif HAVE_GETPROCESSMEMORYINFO
4519 PROCESS_MEMORY_COUNTERS memcounters;
4520 proc = GetCurrentProcess();
4521 memcounters.cb = sizeof(memcounters);
4522 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4523 return memcounters.PeakPagefileUsage;
4529 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4533 int main(int argc, char **argv)
4540 register_exit(ffmpeg_cleanup);
4542 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4544 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4545 parse_loglevel(argc, argv, options);
4547 if(argc>1 && !strcmp(argv[1], "-d")){
4549 av_log_set_callback(log_callback_null);
4554 avcodec_register_all();
4556 avdevice_register_all();
4558 avfilter_register_all();
4560 avformat_network_init();
4562 show_banner(argc, argv, options);
4564 /* parse options and open all input/output files */
4565 ret = ffmpeg_parse_options(argc, argv);
4569 if (nb_output_files <= 0 && nb_input_files == 0) {
4571 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4575 /* file converter / grab */
4576 if (nb_output_files <= 0) {
4577 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4581 // if (nb_input_files == 0) {
4582 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4586 for (i = 0; i < nb_output_files; i++) {
4587 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4591 current_time = ti = getutime();
4592 if (transcode() < 0)
4594 ti = getutime() - ti;
4596 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4598 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4599 decode_error_stat[0], decode_error_stat[1]);
4600 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4603 exit_program(received_nb_signals ? 255 : main_return_code);
4604 return main_return_code;