2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int want_sdp = 1;
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 InputStream **input_streams = NULL;
140 int nb_input_streams = 0;
141 InputFile **input_files = NULL;
142 int nb_input_files = 0;
144 OutputStream **output_streams = NULL;
145 int nb_output_streams = 0;
146 OutputFile **output_files = NULL;
147 int nb_output_files = 0;
149 FilterGraph **filtergraphs;
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
160 static void free_input_threads(void);
164 Convert subtitles to video with alpha to insert them in filter graphs.
165 This is a temporary solution until libavfilter gets real subtitles support.
168 static int sub2video_get_blank_frame(InputStream *ist)
171 AVFrame *frame = ist->sub2video.frame;
173 av_frame_unref(frame);
174 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
176 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
177 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186 uint32_t *pal, *dst2;
190 if (r->type != SUBTITLE_BITMAP) {
191 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196 r->x, r->y, r->w, r->h, w, h
201 dst += r->y * dst_linesize + r->x * 4;
203 pal = (uint32_t *)r->data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 AVFrame *frame = ist->sub2video.frame;
233 int64_t pts, end_pts;
238 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 num_rects = sub->num_rects;
244 pts = ist->sub2video.end_pts;
248 if (sub2video_get_blank_frame(ist) < 0) {
249 av_log(ist->dec_ctx, AV_LOG_ERROR,
250 "Impossible to get a blank canvas.\n");
253 dst = frame->data [0];
254 dst_linesize = frame->linesize[0];
255 for (i = 0; i < num_rects; i++)
256 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257 sub2video_push_ref(ist, pts);
258 ist->sub2video.end_pts = end_pts;
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 InputFile *infile = input_files[ist->file_index];
267 /* When a frame is read from a file, examine all sub2video streams in
268 the same file and send the sub2video frame again. Otherwise, decoded
269 video frames could be accumulating in the filter graph while a filter
270 (possibly overlay) is desperately waiting for a subtitle frame. */
271 for (i = 0; i < infile->nb_streams; i++) {
272 InputStream *ist2 = input_streams[infile->ist_index + i];
273 if (!ist2->sub2video.frame)
275 /* subtitles seem to be usually muxed ahead of other streams;
276 if not, subtracting a larger time here is necessary */
277 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278 /* do not send the heartbeat frame if the subtitle is already ahead */
279 if (pts2 <= ist2->sub2video.last_pts)
281 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282 sub2video_update(ist2, NULL);
283 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286 sub2video_push_ref(ist2, pts2);
290 static void sub2video_flush(InputStream *ist)
294 if (ist->sub2video.end_pts < INT64_MAX)
295 sub2video_update(ist, NULL);
296 for (i = 0; i < ist->nb_filters; i++)
297 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
300 /* end of sub2video hack */
302 static void term_exit_sigsafe(void)
306 tcsetattr (0, TCSANOW, &oldtty);
312 av_log(NULL, AV_LOG_QUIET, "%s", "");
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
323 sigterm_handler(int sig)
325 received_sigterm = sig;
326 received_nb_signals++;
328 if(received_nb_signals > 3) {
329 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330 strlen("Received > 3 system signals, hard exiting\n"));
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
344 case CTRL_BREAK_EVENT:
345 sigterm_handler(SIGINT);
348 case CTRL_CLOSE_EVENT:
349 case CTRL_LOGOFF_EVENT:
350 case CTRL_SHUTDOWN_EVENT:
351 sigterm_handler(SIGTERM);
352 /* Basically, with these 3 events, when we return from this method the
353 process is hard terminated, so stall as long as we need to
354 to try and let the main thread(s) clean up and gracefully terminate
355 (we have at most 5 seconds, but should be done far before that). */
356 while (!ffmpeg_exited) {
362 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (!run_as_daemon && stdin_interaction) {
373 if (tcgetattr (0, &tty) == 0) {
377 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
378 |INLCR|IGNCR|ICRNL|IXON);
379 tty.c_oflag |= OPOST;
380 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
381 tty.c_cflag &= ~(CSIZE|PARENB);
386 tcsetattr (0, TCSANOW, &tty);
388 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
392 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
393 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
395 signal(SIGXCPU, sigterm_handler);
397 #if HAVE_SETCONSOLECTRLHANDLER
398 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
402 /* read a key without blocking */
403 static int read_key(void)
415 n = select(1, &rfds, NULL, NULL, &tv);
424 # if HAVE_PEEKNAMEDPIPE
426 static HANDLE input_handle;
429 input_handle = GetStdHandle(STD_INPUT_HANDLE);
430 is_pipe = !GetConsoleMode(input_handle, &dw);
434 /* When running under a GUI, you will end here. */
435 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
436 // input pipe may have been closed by the program that ran ffmpeg
454 static int decode_interrupt_cb(void *ctx)
456 return received_nb_signals > transcode_init_done;
459 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
461 static void ffmpeg_cleanup(int ret)
466 int maxrss = getmaxrss() / 1024;
467 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
470 for (i = 0; i < nb_filtergraphs; i++) {
471 FilterGraph *fg = filtergraphs[i];
472 avfilter_graph_free(&fg->graph);
473 for (j = 0; j < fg->nb_inputs; j++) {
474 av_freep(&fg->inputs[j]->name);
475 av_freep(&fg->inputs[j]);
477 av_freep(&fg->inputs);
478 for (j = 0; j < fg->nb_outputs; j++) {
479 av_freep(&fg->outputs[j]->name);
480 av_freep(&fg->outputs[j]);
482 av_freep(&fg->outputs);
483 av_freep(&fg->graph_desc);
485 av_freep(&filtergraphs[i]);
487 av_freep(&filtergraphs);
489 av_freep(&subtitle_out);
492 for (i = 0; i < nb_output_files; i++) {
493 OutputFile *of = output_files[i];
498 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
500 avformat_free_context(s);
501 av_dict_free(&of->opts);
503 av_freep(&output_files[i]);
505 for (i = 0; i < nb_output_streams; i++) {
506 OutputStream *ost = output_streams[i];
511 for (j = 0; j < ost->nb_bitstream_filters; j++)
512 av_bsf_free(&ost->bsf_ctx[j]);
513 av_freep(&ost->bsf_ctx);
514 av_freep(&ost->bsf_extradata_updated);
516 av_frame_free(&ost->filtered_frame);
517 av_frame_free(&ost->last_frame);
518 av_dict_free(&ost->encoder_opts);
520 av_parser_close(ost->parser);
521 avcodec_free_context(&ost->parser_avctx);
523 av_freep(&ost->forced_keyframes);
524 av_expr_free(ost->forced_keyframes_pexpr);
525 av_freep(&ost->avfilter);
526 av_freep(&ost->logfile_prefix);
528 av_freep(&ost->audio_channels_map);
529 ost->audio_channels_mapped = 0;
531 av_dict_free(&ost->sws_dict);
533 avcodec_free_context(&ost->enc_ctx);
534 avcodec_parameters_free(&ost->ref_par);
536 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
538 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
539 av_packet_unref(&pkt);
541 av_fifo_freep(&ost->muxing_queue);
543 av_freep(&output_streams[i]);
546 free_input_threads();
548 for (i = 0; i < nb_input_files; i++) {
549 avformat_close_input(&input_files[i]->ctx);
550 av_freep(&input_files[i]);
552 for (i = 0; i < nb_input_streams; i++) {
553 InputStream *ist = input_streams[i];
555 av_frame_free(&ist->decoded_frame);
556 av_frame_free(&ist->filter_frame);
557 av_dict_free(&ist->decoder_opts);
558 avsubtitle_free(&ist->prev_sub.subtitle);
559 av_frame_free(&ist->sub2video.frame);
560 av_freep(&ist->filters);
561 av_freep(&ist->hwaccel_device);
562 av_freep(&ist->dts_buffer);
564 avcodec_free_context(&ist->dec_ctx);
566 av_freep(&input_streams[i]);
570 if (fclose(vstats_file))
571 av_log(NULL, AV_LOG_ERROR,
572 "Error closing vstats file, loss of information possible: %s\n",
573 av_err2str(AVERROR(errno)));
575 av_freep(&vstats_filename);
577 av_freep(&input_streams);
578 av_freep(&input_files);
579 av_freep(&output_streams);
580 av_freep(&output_files);
584 avformat_network_deinit();
586 if (received_sigterm) {
587 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
588 (int) received_sigterm);
589 } else if (ret && transcode_init_done) {
590 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
596 void remove_avoptions(AVDictionary **a, AVDictionary *b)
598 AVDictionaryEntry *t = NULL;
600 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
601 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
605 void assert_avoptions(AVDictionary *m)
607 AVDictionaryEntry *t;
608 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
609 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
614 static void abort_codec_experimental(AVCodec *c, int encoder)
619 static void update_benchmark(const char *fmt, ...)
621 if (do_benchmark_all) {
622 int64_t t = getutime();
628 vsnprintf(buf, sizeof(buf), fmt, va);
630 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
636 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
639 for (i = 0; i < nb_output_streams; i++) {
640 OutputStream *ost2 = output_streams[i];
641 ost2->finished |= ost == ost2 ? this_stream : others;
645 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
647 AVFormatContext *s = of->ctx;
648 AVStream *st = ost->st;
651 if (!of->header_written) {
653 /* the muxer is not initialized yet, buffer the packet */
654 if (!av_fifo_space(ost->muxing_queue)) {
655 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
656 ost->max_muxing_queue_size);
657 if (new_size <= av_fifo_size(ost->muxing_queue)) {
658 av_log(NULL, AV_LOG_ERROR,
659 "Too many packets buffered for output stream %d:%d.\n",
660 ost->file_index, ost->st->index);
663 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
667 av_packet_move_ref(&tmp_pkt, pkt);
668 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
672 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
673 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
674 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
677 * Audio encoders may split the packets -- #frames in != #packets out.
678 * But there is no reordering, so we can limit the number of output packets
679 * by simply dropping them here.
680 * Counting encoded video frames needs to be done separately because of
681 * reordering, see do_video_out()
683 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
684 if (ost->frame_number >= ost->max_frames) {
685 av_packet_unref(pkt);
690 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
692 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
694 ost->quality = sd ? AV_RL32(sd) : -1;
695 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
697 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
699 ost->error[i] = AV_RL64(sd + 8 + 8*i);
704 if (ost->frame_rate.num && ost->is_cfr) {
705 if (pkt->duration > 0)
706 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
707 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
712 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
713 if (pkt->dts != AV_NOPTS_VALUE &&
714 pkt->pts != AV_NOPTS_VALUE &&
715 pkt->dts > pkt->pts) {
716 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
718 ost->file_index, ost->st->index);
720 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
721 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
722 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
724 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
725 pkt->dts != AV_NOPTS_VALUE &&
726 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
727 ost->last_mux_dts != AV_NOPTS_VALUE) {
728 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
729 if (pkt->dts < max) {
730 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
731 av_log(s, loglevel, "Non-monotonous DTS in output stream "
732 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
733 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
735 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
738 av_log(s, loglevel, "changing to %"PRId64". This may result "
739 "in incorrect timestamps in the output file.\n",
741 if (pkt->pts >= pkt->dts)
742 pkt->pts = FFMAX(pkt->pts, max);
747 ost->last_mux_dts = pkt->dts;
749 ost->data_size += pkt->size;
750 ost->packets_written++;
752 pkt->stream_index = ost->index;
755 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
756 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
757 av_get_media_type_string(ost->enc_ctx->codec_type),
758 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
759 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
764 ret = av_interleaved_write_frame(s, pkt);
766 print_error("av_interleaved_write_frame()", ret);
767 main_return_code = 1;
768 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
770 av_packet_unref(pkt);
773 static void close_output_stream(OutputStream *ost)
775 OutputFile *of = output_files[ost->file_index];
777 ost->finished |= ENCODER_FINISHED;
779 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
780 of->recording_time = FFMIN(of->recording_time, end);
784 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
788 /* apply the output bitstream filters, if any */
789 if (ost->nb_bitstream_filters) {
792 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
798 /* get a packet from the previous filter up the chain */
799 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
800 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
801 * the api states this shouldn't happen after init(). Propagate it here to the
802 * muxer and to the next filters in the chain to workaround this.
803 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
804 * par_out->extradata and adapt muxers accordingly to get rid of this. */
805 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
806 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
809 ost->bsf_extradata_updated[idx - 1] |= 1;
811 if (ret == AVERROR(EAGAIN)) {
818 /* send it to the next filter down the chain or to the muxer */
819 if (idx < ost->nb_bitstream_filters) {
820 /* HACK/FIXME! - See above */
821 if (!(ost->bsf_extradata_updated[idx] & 2)) {
822 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
825 ost->bsf_extradata_updated[idx] |= 2;
827 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
832 write_packet(of, pkt, ost);
835 write_packet(of, pkt, ost);
838 if (ret < 0 && ret != AVERROR_EOF) {
839 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
840 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
846 static int check_recording_time(OutputStream *ost)
848 OutputFile *of = output_files[ost->file_index];
850 if (of->recording_time != INT64_MAX &&
851 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
852 AV_TIME_BASE_Q) >= 0) {
853 close_output_stream(ost);
859 static void do_audio_out(OutputFile *of, OutputStream *ost,
862 AVCodecContext *enc = ost->enc_ctx;
866 av_init_packet(&pkt);
870 if (!check_recording_time(ost))
873 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
874 frame->pts = ost->sync_opts;
875 ost->sync_opts = frame->pts + frame->nb_samples;
876 ost->samples_encoded += frame->nb_samples;
877 ost->frames_encoded++;
879 av_assert0(pkt.size || !pkt.data);
880 update_benchmark(NULL);
882 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
883 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
884 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
885 enc->time_base.num, enc->time_base.den);
888 ret = avcodec_send_frame(enc, frame);
893 ret = avcodec_receive_packet(enc, &pkt);
894 if (ret == AVERROR(EAGAIN))
899 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
901 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
904 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
905 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
906 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
907 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
910 output_packet(of, &pkt, ost);
915 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
919 static void do_subtitle_out(OutputFile *of,
923 int subtitle_out_max_size = 1024 * 1024;
924 int subtitle_out_size, nb, i;
929 if (sub->pts == AV_NOPTS_VALUE) {
930 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
939 subtitle_out = av_malloc(subtitle_out_max_size);
941 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
946 /* Note: DVB subtitle need one packet to draw them and one other
947 packet to clear them */
948 /* XXX: signal it in the codec context ? */
949 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
954 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
956 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
957 pts -= output_files[ost->file_index]->start_time;
958 for (i = 0; i < nb; i++) {
959 unsigned save_num_rects = sub->num_rects;
961 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
962 if (!check_recording_time(ost))
966 // start_display_time is required to be 0
967 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
968 sub->end_display_time -= sub->start_display_time;
969 sub->start_display_time = 0;
973 ost->frames_encoded++;
975 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
976 subtitle_out_max_size, sub);
978 sub->num_rects = save_num_rects;
979 if (subtitle_out_size < 0) {
980 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
984 av_init_packet(&pkt);
985 pkt.data = subtitle_out;
986 pkt.size = subtitle_out_size;
987 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
988 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
989 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
990 /* XXX: the pts correction is handled here. Maybe handling
991 it in the codec would be better */
993 pkt.pts += 90 * sub->start_display_time;
995 pkt.pts += 90 * sub->end_display_time;
998 output_packet(of, &pkt, ost);
1002 static void do_video_out(OutputFile *of,
1004 AVFrame *next_picture,
1007 int ret, format_video_sync;
1009 AVCodecContext *enc = ost->enc_ctx;
1010 AVCodecParameters *mux_par = ost->st->codecpar;
1011 int nb_frames, nb0_frames, i;
1012 double delta, delta0;
1013 double duration = 0;
1015 InputStream *ist = NULL;
1016 AVFilterContext *filter = ost->filter->filter;
1018 if (ost->source_index >= 0)
1019 ist = input_streams[ost->source_index];
1021 if (filter->inputs[0]->frame_rate.num > 0 &&
1022 filter->inputs[0]->frame_rate.den > 0)
1023 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1025 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1026 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1028 if (!ost->filters_script &&
1032 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1033 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1036 if (!next_picture) {
1038 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1039 ost->last_nb0_frames[1],
1040 ost->last_nb0_frames[2]);
1042 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1043 delta = delta0 + duration;
1045 /* by default, we output a single frame */
1046 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1049 format_video_sync = video_sync_method;
1050 if (format_video_sync == VSYNC_AUTO) {
1051 if(!strcmp(of->ctx->oformat->name, "avi")) {
1052 format_video_sync = VSYNC_VFR;
1054 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1056 && format_video_sync == VSYNC_CFR
1057 && input_files[ist->file_index]->ctx->nb_streams == 1
1058 && input_files[ist->file_index]->input_ts_offset == 0) {
1059 format_video_sync = VSYNC_VSCFR;
1061 if (format_video_sync == VSYNC_CFR && copy_ts) {
1062 format_video_sync = VSYNC_VSCFR;
1065 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1069 format_video_sync != VSYNC_PASSTHROUGH &&
1070 format_video_sync != VSYNC_DROP) {
1071 if (delta0 < -0.6) {
1072 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1074 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1075 sync_ipts = ost->sync_opts;
1080 switch (format_video_sync) {
1082 if (ost->frame_number == 0 && delta0 >= 0.5) {
1083 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1086 ost->sync_opts = lrint(sync_ipts);
1089 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1090 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1092 } else if (delta < -1.1)
1094 else if (delta > 1.1) {
1095 nb_frames = lrintf(delta);
1097 nb0_frames = lrintf(delta0 - 0.6);
1103 else if (delta > 0.6)
1104 ost->sync_opts = lrint(sync_ipts);
1107 case VSYNC_PASSTHROUGH:
1108 ost->sync_opts = lrint(sync_ipts);
1115 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1116 nb0_frames = FFMIN(nb0_frames, nb_frames);
1118 memmove(ost->last_nb0_frames + 1,
1119 ost->last_nb0_frames,
1120 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1121 ost->last_nb0_frames[0] = nb0_frames;
1123 if (nb0_frames == 0 && ost->last_dropped) {
1125 av_log(NULL, AV_LOG_VERBOSE,
1126 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1127 ost->frame_number, ost->st->index, ost->last_frame->pts);
1129 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1130 if (nb_frames > dts_error_threshold * 30) {
1131 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1135 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1136 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1138 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1140 /* duplicates frame if needed */
1141 for (i = 0; i < nb_frames; i++) {
1142 AVFrame *in_picture;
1143 av_init_packet(&pkt);
1147 if (i < nb0_frames && ost->last_frame) {
1148 in_picture = ost->last_frame;
1150 in_picture = next_picture;
1155 in_picture->pts = ost->sync_opts;
1158 if (!check_recording_time(ost))
1160 if (ost->frame_number >= ost->max_frames)
1164 #if FF_API_LAVF_FMT_RAWPICTURE
1165 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1166 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1167 /* raw pictures are written as AVPicture structure to
1168 avoid any copies. We support temporarily the older
1170 if (in_picture->interlaced_frame)
1171 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1173 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1174 pkt.data = (uint8_t *)in_picture;
1175 pkt.size = sizeof(AVPicture);
1176 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1177 pkt.flags |= AV_PKT_FLAG_KEY;
1179 output_packet(of, &pkt, ost);
1183 int forced_keyframe = 0;
1186 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1187 ost->top_field_first >= 0)
1188 in_picture->top_field_first = !!ost->top_field_first;
1190 if (in_picture->interlaced_frame) {
1191 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1192 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1194 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1196 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1198 in_picture->quality = enc->global_quality;
1199 in_picture->pict_type = 0;
1201 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1202 in_picture->pts * av_q2d(enc->time_base) : NAN;
1203 if (ost->forced_kf_index < ost->forced_kf_count &&
1204 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1205 ost->forced_kf_index++;
1206 forced_keyframe = 1;
1207 } else if (ost->forced_keyframes_pexpr) {
1209 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1210 res = av_expr_eval(ost->forced_keyframes_pexpr,
1211 ost->forced_keyframes_expr_const_values, NULL);
1212 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1213 ost->forced_keyframes_expr_const_values[FKF_N],
1214 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1215 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1216 ost->forced_keyframes_expr_const_values[FKF_T],
1217 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1220 forced_keyframe = 1;
1221 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1222 ost->forced_keyframes_expr_const_values[FKF_N];
1223 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1224 ost->forced_keyframes_expr_const_values[FKF_T];
1225 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1228 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1229 } else if ( ost->forced_keyframes
1230 && !strncmp(ost->forced_keyframes, "source", 6)
1231 && in_picture->key_frame==1) {
1232 forced_keyframe = 1;
1235 if (forced_keyframe) {
1236 in_picture->pict_type = AV_PICTURE_TYPE_I;
1237 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1240 update_benchmark(NULL);
1242 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1243 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1244 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1245 enc->time_base.num, enc->time_base.den);
1248 ost->frames_encoded++;
1250 ret = avcodec_send_frame(enc, in_picture);
1255 ret = avcodec_receive_packet(enc, &pkt);
1256 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1257 if (ret == AVERROR(EAGAIN))
1263 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1264 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1265 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1266 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1269 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1270 pkt.pts = ost->sync_opts;
1272 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1275 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1276 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1277 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1278 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1281 frame_size = pkt.size;
1282 output_packet(of, &pkt, ost);
1284 /* if two pass, output log */
1285 if (ost->logfile && enc->stats_out) {
1286 fprintf(ost->logfile, "%s", enc->stats_out);
1292 * For video, number of frames in == number of packets out.
1293 * But there may be reordering, so we can't throw away frames on encoder
1294 * flush, we need to limit them here, before they go into encoder.
1296 ost->frame_number++;
1298 if (vstats_filename && frame_size)
1299 do_video_stats(ost, frame_size);
1302 if (!ost->last_frame)
1303 ost->last_frame = av_frame_alloc();
1304 av_frame_unref(ost->last_frame);
1305 if (next_picture && ost->last_frame)
1306 av_frame_ref(ost->last_frame, next_picture);
1308 av_frame_free(&ost->last_frame);
1312 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1316 static double psnr(double d)
1318 return -10.0 * log10(d);
1321 static void do_video_stats(OutputStream *ost, int frame_size)
1323 AVCodecContext *enc;
1325 double ti1, bitrate, avg_bitrate;
1327 /* this is executed just the first time do_video_stats is called */
1329 vstats_file = fopen(vstats_filename, "w");
1337 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1338 frame_number = ost->st->nb_frames;
1339 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1340 ost->quality / (float)FF_QP2LAMBDA);
1342 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1343 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1345 fprintf(vstats_file,"f_size= %6d ", frame_size);
1346 /* compute pts value */
1347 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1351 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1352 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1353 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1354 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1355 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1359 static void finish_output_stream(OutputStream *ost)
1361 OutputFile *of = output_files[ost->file_index];
1364 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1367 for (i = 0; i < of->ctx->nb_streams; i++)
1368 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1373 * Get and encode new output from any of the filtergraphs, without causing
1376 * @return 0 for success, <0 for severe errors
1378 static int reap_filters(int flush)
1380 AVFrame *filtered_frame = NULL;
1383 /* Reap all buffers present in the buffer sinks */
1384 for (i = 0; i < nb_output_streams; i++) {
1385 OutputStream *ost = output_streams[i];
1386 OutputFile *of = output_files[ost->file_index];
1387 AVFilterContext *filter;
1388 AVCodecContext *enc = ost->enc_ctx;
1393 filter = ost->filter->filter;
1395 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1396 return AVERROR(ENOMEM);
1398 filtered_frame = ost->filtered_frame;
1401 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1402 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1403 AV_BUFFERSINK_FLAG_NO_REQUEST);
1405 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1406 av_log(NULL, AV_LOG_WARNING,
1407 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1408 } else if (flush && ret == AVERROR_EOF) {
1409 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1410 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1414 if (ost->finished) {
1415 av_frame_unref(filtered_frame);
1418 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1419 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1420 AVRational tb = enc->time_base;
1421 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1423 tb.den <<= extra_bits;
1425 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1426 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1427 float_pts /= 1 << extra_bits;
1428 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1429 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1431 filtered_frame->pts =
1432 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1433 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1435 //if (ost->source_index >= 0)
1436 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1438 switch (filter->inputs[0]->type) {
1439 case AVMEDIA_TYPE_VIDEO:
1440 if (!ost->frame_aspect_ratio.num)
1441 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1444 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1445 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1447 enc->time_base.num, enc->time_base.den);
1450 do_video_out(of, ost, filtered_frame, float_pts);
1452 case AVMEDIA_TYPE_AUDIO:
1453 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1454 enc->channels != av_frame_get_channels(filtered_frame)) {
1455 av_log(NULL, AV_LOG_ERROR,
1456 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1459 do_audio_out(of, ost, filtered_frame);
1462 // TODO support subtitle filters
1466 av_frame_unref(filtered_frame);
1473 static void print_final_stats(int64_t total_size)
1475 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1476 uint64_t subtitle_size = 0;
1477 uint64_t data_size = 0;
1478 float percent = -1.0;
1482 for (i = 0; i < nb_output_streams; i++) {
1483 OutputStream *ost = output_streams[i];
1484 switch (ost->enc_ctx->codec_type) {
1485 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1486 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1487 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1488 default: other_size += ost->data_size; break;
1490 extra_size += ost->enc_ctx->extradata_size;
1491 data_size += ost->data_size;
1492 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1493 != AV_CODEC_FLAG_PASS1)
1497 if (data_size && total_size>0 && total_size >= data_size)
1498 percent = 100.0 * (total_size - data_size) / data_size;
1500 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1501 video_size / 1024.0,
1502 audio_size / 1024.0,
1503 subtitle_size / 1024.0,
1504 other_size / 1024.0,
1505 extra_size / 1024.0);
1507 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1509 av_log(NULL, AV_LOG_INFO, "unknown");
1510 av_log(NULL, AV_LOG_INFO, "\n");
1512 /* print verbose per-stream stats */
1513 for (i = 0; i < nb_input_files; i++) {
1514 InputFile *f = input_files[i];
1515 uint64_t total_packets = 0, total_size = 0;
1517 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1518 i, f->ctx->filename);
1520 for (j = 0; j < f->nb_streams; j++) {
1521 InputStream *ist = input_streams[f->ist_index + j];
1522 enum AVMediaType type = ist->dec_ctx->codec_type;
1524 total_size += ist->data_size;
1525 total_packets += ist->nb_packets;
1527 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1528 i, j, media_type_string(type));
1529 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1530 ist->nb_packets, ist->data_size);
1532 if (ist->decoding_needed) {
1533 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1534 ist->frames_decoded);
1535 if (type == AVMEDIA_TYPE_AUDIO)
1536 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1537 av_log(NULL, AV_LOG_VERBOSE, "; ");
1540 av_log(NULL, AV_LOG_VERBOSE, "\n");
1543 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1544 total_packets, total_size);
1547 for (i = 0; i < nb_output_files; i++) {
1548 OutputFile *of = output_files[i];
1549 uint64_t total_packets = 0, total_size = 0;
1551 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1552 i, of->ctx->filename);
1554 for (j = 0; j < of->ctx->nb_streams; j++) {
1555 OutputStream *ost = output_streams[of->ost_index + j];
1556 enum AVMediaType type = ost->enc_ctx->codec_type;
1558 total_size += ost->data_size;
1559 total_packets += ost->packets_written;
1561 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1562 i, j, media_type_string(type));
1563 if (ost->encoding_needed) {
1564 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1565 ost->frames_encoded);
1566 if (type == AVMEDIA_TYPE_AUDIO)
1567 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1568 av_log(NULL, AV_LOG_VERBOSE, "; ");
1571 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1572 ost->packets_written, ost->data_size);
1574 av_log(NULL, AV_LOG_VERBOSE, "\n");
1577 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1578 total_packets, total_size);
1580 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1581 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1583 av_log(NULL, AV_LOG_WARNING, "\n");
1585 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1590 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1593 AVBPrint buf_script;
1595 AVFormatContext *oc;
1597 AVCodecContext *enc;
1598 int frame_number, vid, i;
1601 int64_t pts = INT64_MIN + 1;
1602 static int64_t last_time = -1;
1603 static int qp_histogram[52];
1604 int hours, mins, secs, us;
1608 if (!print_stats && !is_last_report && !progress_avio)
1611 if (!is_last_report) {
1612 if (last_time == -1) {
1613 last_time = cur_time;
1616 if ((cur_time - last_time) < 500000)
1618 last_time = cur_time;
1621 t = (cur_time-timer_start) / 1000000.0;
1624 oc = output_files[0]->ctx;
1626 total_size = avio_size(oc->pb);
1627 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1628 total_size = avio_tell(oc->pb);
1632 av_bprint_init(&buf_script, 0, 1);
1633 for (i = 0; i < nb_output_streams; i++) {
1635 ost = output_streams[i];
1637 if (!ost->stream_copy)
1638 q = ost->quality / (float) FF_QP2LAMBDA;
1640 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1641 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1642 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1643 ost->file_index, ost->index, q);
1645 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1648 frame_number = ost->frame_number;
1649 fps = t > 1 ? frame_number / t : 0;
1650 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1651 frame_number, fps < 9.95, fps, q);
1652 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1653 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1654 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1655 ost->file_index, ost->index, q);
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1661 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1663 for (j = 0; j < 32; j++)
1664 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1667 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1669 double error, error_sum = 0;
1670 double scale, scale_sum = 0;
1672 char type[3] = { 'Y','U','V' };
1673 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1674 for (j = 0; j < 3; j++) {
1675 if (is_last_report) {
1676 error = enc->error[j];
1677 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1679 error = ost->error[j];
1680 scale = enc->width * enc->height * 255.0 * 255.0;
1686 p = psnr(error / scale);
1687 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1688 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1689 ost->file_index, ost->index, type[j] | 32, p);
1691 p = psnr(error_sum / scale_sum);
1692 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1693 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1694 ost->file_index, ost->index, p);
1698 /* compute min output value */
1699 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1700 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1701 ost->st->time_base, AV_TIME_BASE_Q));
1703 nb_frames_drop += ost->last_dropped;
1706 secs = FFABS(pts) / AV_TIME_BASE;
1707 us = FFABS(pts) % AV_TIME_BASE;
1713 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1714 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1716 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1718 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1719 "size=%8.0fkB time=", total_size / 1024.0);
1721 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1722 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1723 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1724 (100 * us) / AV_TIME_BASE);
1727 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1728 av_bprintf(&buf_script, "bitrate=N/A\n");
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1731 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1734 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1735 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1736 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1737 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1738 hours, mins, secs, us);
1740 if (nb_frames_dup || nb_frames_drop)
1741 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1742 nb_frames_dup, nb_frames_drop);
1743 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1744 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1747 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1748 av_bprintf(&buf_script, "speed=N/A\n");
1750 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1751 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1754 if (print_stats || is_last_report) {
1755 const char end = is_last_report ? '\n' : '\r';
1756 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1757 fprintf(stderr, "%s %c", buf, end);
1759 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1764 if (progress_avio) {
1765 av_bprintf(&buf_script, "progress=%s\n",
1766 is_last_report ? "end" : "continue");
1767 avio_write(progress_avio, buf_script.str,
1768 FFMIN(buf_script.len, buf_script.size - 1));
1769 avio_flush(progress_avio);
1770 av_bprint_finalize(&buf_script, NULL);
1771 if (is_last_report) {
1772 if ((ret = avio_closep(&progress_avio)) < 0)
1773 av_log(NULL, AV_LOG_ERROR,
1774 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1779 print_final_stats(total_size);
1782 static void flush_encoders(void)
1786 for (i = 0; i < nb_output_streams; i++) {
1787 OutputStream *ost = output_streams[i];
1788 AVCodecContext *enc = ost->enc_ctx;
1789 OutputFile *of = output_files[ost->file_index];
1790 int stop_encoding = 0;
1792 if (!ost->encoding_needed)
1795 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1797 #if FF_API_LAVF_FMT_RAWPICTURE
1798 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1802 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1805 avcodec_send_frame(enc, NULL);
1808 const char *desc = NULL;
1810 switch (enc->codec_type) {
1811 case AVMEDIA_TYPE_AUDIO:
1814 case AVMEDIA_TYPE_VIDEO:
1824 av_init_packet(&pkt);
1828 update_benchmark(NULL);
1829 ret = avcodec_receive_packet(enc, &pkt);
1830 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1831 if (ret < 0 && ret != AVERROR_EOF) {
1832 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1837 if (ost->logfile && enc->stats_out) {
1838 fprintf(ost->logfile, "%s", enc->stats_out);
1840 if (ret == AVERROR_EOF) {
1844 if (ost->finished & MUXER_FINISHED) {
1845 av_packet_unref(&pkt);
1848 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1849 pkt_size = pkt.size;
1850 output_packet(of, &pkt, ost);
1851 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1852 do_video_stats(ost, pkt_size);
1863 * Check whether a packet from ist should be written into ost at this time
1865 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1867 OutputFile *of = output_files[ost->file_index];
1868 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1870 if (ost->source_index != ist_index)
1876 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1882 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1884 OutputFile *of = output_files[ost->file_index];
1885 InputFile *f = input_files [ist->file_index];
1886 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1887 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1891 av_init_packet(&opkt);
1893 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1894 !ost->copy_initial_nonkeyframes)
1897 if (!ost->frame_number && !ost->copy_prior_start) {
1898 int64_t comp_start = start_time;
1899 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1900 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1901 if (pkt->pts == AV_NOPTS_VALUE ?
1902 ist->pts < comp_start :
1903 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1907 if (of->recording_time != INT64_MAX &&
1908 ist->pts >= of->recording_time + start_time) {
1909 close_output_stream(ost);
1913 if (f->recording_time != INT64_MAX) {
1914 start_time = f->ctx->start_time;
1915 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1916 start_time += f->start_time;
1917 if (ist->pts >= f->recording_time + start_time) {
1918 close_output_stream(ost);
1923 /* force the input stream PTS */
1924 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1927 if (pkt->pts != AV_NOPTS_VALUE)
1928 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1930 opkt.pts = AV_NOPTS_VALUE;
1932 if (pkt->dts == AV_NOPTS_VALUE)
1933 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1935 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1936 opkt.dts -= ost_tb_start_time;
1938 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1939 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1941 duration = ist->dec_ctx->frame_size;
1942 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1943 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1944 ost->st->time_base) - ost_tb_start_time;
1947 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1948 opkt.flags = pkt->flags;
1949 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1950 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1951 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1952 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1953 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1955 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1956 &opkt.data, &opkt.size,
1957 pkt->data, pkt->size,
1958 pkt->flags & AV_PKT_FLAG_KEY);
1960 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1965 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1970 opkt.data = pkt->data;
1971 opkt.size = pkt->size;
1973 av_copy_packet_side_data(&opkt, pkt);
1975 #if FF_API_LAVF_FMT_RAWPICTURE
1976 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1977 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1978 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1979 /* store AVPicture in AVPacket, as expected by the output format */
1980 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1982 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1986 opkt.data = (uint8_t *)&pict;
1987 opkt.size = sizeof(AVPicture);
1988 opkt.flags |= AV_PKT_FLAG_KEY;
1992 output_packet(of, &opkt, ost);
1995 int guess_input_channel_layout(InputStream *ist)
1997 AVCodecContext *dec = ist->dec_ctx;
1999 if (!dec->channel_layout) {
2000 char layout_name[256];
2002 if (dec->channels > ist->guess_layout_max)
2004 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2005 if (!dec->channel_layout)
2007 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2008 dec->channels, dec->channel_layout);
2009 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2010 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2015 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2017 if (*got_output || ret<0)
2018 decode_error_stat[ret<0] ++;
2020 if (ret < 0 && exit_on_error)
2023 if (exit_on_error && *got_output && ist) {
2024 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2025 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2031 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2032 // There is the following difference: if you got a frame, you must call
2033 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2034 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2035 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2042 ret = avcodec_send_packet(avctx, pkt);
2043 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2044 // decoded frames with avcodec_receive_frame() until done.
2045 if (ret < 0 && ret != AVERROR_EOF)
2049 ret = avcodec_receive_frame(avctx, frame);
2050 if (ret < 0 && ret != AVERROR(EAGAIN))
2058 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2060 AVFrame *decoded_frame, *f;
2061 AVCodecContext *avctx = ist->dec_ctx;
2062 int i, ret, err = 0, resample_changed;
2063 AVRational decoded_frame_tb;
2065 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2066 return AVERROR(ENOMEM);
2067 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2068 return AVERROR(ENOMEM);
2069 decoded_frame = ist->decoded_frame;
2071 update_benchmark(NULL);
2072 ret = decode(avctx, decoded_frame, got_output, pkt);
2073 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2075 if (ret >= 0 && avctx->sample_rate <= 0) {
2076 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2077 ret = AVERROR_INVALIDDATA;
2080 if (ret != AVERROR_EOF)
2081 check_decode_result(ist, got_output, ret);
2083 if (!*got_output || ret < 0)
2086 ist->samples_decoded += decoded_frame->nb_samples;
2087 ist->frames_decoded++;
2090 /* increment next_dts to use for the case where the input stream does not
2091 have timestamps or there are multiple frames in the packet */
2092 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2094 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2098 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2099 ist->resample_channels != avctx->channels ||
2100 ist->resample_channel_layout != decoded_frame->channel_layout ||
2101 ist->resample_sample_rate != decoded_frame->sample_rate;
2102 if (resample_changed) {
2103 char layout1[64], layout2[64];
2105 if (!guess_input_channel_layout(ist)) {
2106 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2107 "layout for Input Stream #%d.%d\n", ist->file_index,
2111 decoded_frame->channel_layout = avctx->channel_layout;
2113 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2114 ist->resample_channel_layout);
2115 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2116 decoded_frame->channel_layout);
2118 av_log(NULL, AV_LOG_INFO,
2119 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2120 ist->file_index, ist->st->index,
2121 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2122 ist->resample_channels, layout1,
2123 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2124 avctx->channels, layout2);
2126 ist->resample_sample_fmt = decoded_frame->format;
2127 ist->resample_sample_rate = decoded_frame->sample_rate;
2128 ist->resample_channel_layout = decoded_frame->channel_layout;
2129 ist->resample_channels = avctx->channels;
2131 for (i = 0; i < nb_filtergraphs; i++)
2132 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2133 FilterGraph *fg = filtergraphs[i];
2134 if (configure_filtergraph(fg) < 0) {
2135 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2141 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2142 decoded_frame_tb = ist->st->time_base;
2143 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2144 decoded_frame->pts = pkt->pts;
2145 decoded_frame_tb = ist->st->time_base;
2147 decoded_frame->pts = ist->dts;
2148 decoded_frame_tb = AV_TIME_BASE_Q;
2150 if (decoded_frame->pts != AV_NOPTS_VALUE)
2151 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2152 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2153 (AVRational){1, avctx->sample_rate});
2154 ist->nb_samples = decoded_frame->nb_samples;
2155 for (i = 0; i < ist->nb_filters; i++) {
2156 if (i < ist->nb_filters - 1) {
2157 f = ist->filter_frame;
2158 err = av_frame_ref(f, decoded_frame);
2163 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2164 AV_BUFFERSRC_FLAG_PUSH);
2165 if (err == AVERROR_EOF)
2166 err = 0; /* ignore */
2170 decoded_frame->pts = AV_NOPTS_VALUE;
2172 av_frame_unref(ist->filter_frame);
2173 av_frame_unref(decoded_frame);
2174 return err < 0 ? err : ret;
2177 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2179 AVFrame *decoded_frame, *f;
2180 int i, ret = 0, err = 0, resample_changed;
2181 int64_t best_effort_timestamp;
2182 int64_t dts = AV_NOPTS_VALUE;
2183 AVRational *frame_sample_aspect;
2186 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2187 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2189 if (!eof && pkt && pkt->size == 0)
2192 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2193 return AVERROR(ENOMEM);
2194 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2195 return AVERROR(ENOMEM);
2196 decoded_frame = ist->decoded_frame;
2197 if (ist->dts != AV_NOPTS_VALUE)
2198 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2201 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2204 // The old code used to set dts on the drain packet, which does not work
2205 // with the new API anymore.
2207 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2209 return AVERROR(ENOMEM);
2210 ist->dts_buffer = new;
2211 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2214 update_benchmark(NULL);
2215 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2216 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2218 // The following line may be required in some cases where there is no parser
2219 // or the parser does not has_b_frames correctly
2220 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2221 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2222 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2224 av_log(ist->dec_ctx, AV_LOG_WARNING,
2225 "video_delay is larger in decoder than demuxer %d > %d.\n"
2226 "If you want to help, upload a sample "
2227 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2228 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2229 ist->dec_ctx->has_b_frames,
2230 ist->st->codecpar->video_delay);
2233 if (ret != AVERROR_EOF)
2234 check_decode_result(ist, got_output, ret);
2236 if (*got_output && ret >= 0) {
2237 if (ist->dec_ctx->width != decoded_frame->width ||
2238 ist->dec_ctx->height != decoded_frame->height ||
2239 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2240 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2241 decoded_frame->width,
2242 decoded_frame->height,
2243 decoded_frame->format,
2244 ist->dec_ctx->width,
2245 ist->dec_ctx->height,
2246 ist->dec_ctx->pix_fmt);
2250 if (!*got_output || ret < 0)
2253 if(ist->top_field_first>=0)
2254 decoded_frame->top_field_first = ist->top_field_first;
2256 ist->frames_decoded++;
2258 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2259 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2263 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2265 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2267 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2268 best_effort_timestamp = ist->dts_buffer[0];
2270 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2271 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2272 ist->nb_dts_buffer--;
2275 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2276 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2278 if (ts != AV_NOPTS_VALUE)
2279 ist->next_pts = ist->pts = ts;
2283 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2284 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2285 ist->st->index, av_ts2str(decoded_frame->pts),
2286 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2287 best_effort_timestamp,
2288 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2289 decoded_frame->key_frame, decoded_frame->pict_type,
2290 ist->st->time_base.num, ist->st->time_base.den);
2293 if (ist->st->sample_aspect_ratio.num)
2294 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2296 resample_changed = ist->resample_width != decoded_frame->width ||
2297 ist->resample_height != decoded_frame->height ||
2298 ist->resample_pix_fmt != decoded_frame->format;
2299 if (resample_changed) {
2300 av_log(NULL, AV_LOG_INFO,
2301 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2302 ist->file_index, ist->st->index,
2303 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2304 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2306 ist->resample_width = decoded_frame->width;
2307 ist->resample_height = decoded_frame->height;
2308 ist->resample_pix_fmt = decoded_frame->format;
2310 for (i = 0; i < nb_filtergraphs; i++) {
2311 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2312 configure_filtergraph(filtergraphs[i]) < 0) {
2313 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2319 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2320 for (i = 0; i < ist->nb_filters; i++) {
2321 if (!frame_sample_aspect->num)
2322 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2324 if (i < ist->nb_filters - 1) {
2325 f = ist->filter_frame;
2326 err = av_frame_ref(f, decoded_frame);
2331 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2332 if (err == AVERROR_EOF) {
2333 err = 0; /* ignore */
2334 } else if (err < 0) {
2335 av_log(NULL, AV_LOG_FATAL,
2336 "Failed to inject frame into filter network: %s\n", av_err2str(err));
2342 av_frame_unref(ist->filter_frame);
2343 av_frame_unref(decoded_frame);
2344 return err < 0 ? err : ret;
2347 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2349 AVSubtitle subtitle;
2350 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2351 &subtitle, got_output, pkt);
2353 check_decode_result(NULL, got_output, ret);
2355 if (ret < 0 || !*got_output) {
2357 sub2video_flush(ist);
2361 if (ist->fix_sub_duration) {
2363 if (ist->prev_sub.got_output) {
2364 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2365 1000, AV_TIME_BASE);
2366 if (end < ist->prev_sub.subtitle.end_display_time) {
2367 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2368 "Subtitle duration reduced from %d to %d%s\n",
2369 ist->prev_sub.subtitle.end_display_time, end,
2370 end <= 0 ? ", dropping it" : "");
2371 ist->prev_sub.subtitle.end_display_time = end;
2374 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2375 FFSWAP(int, ret, ist->prev_sub.ret);
2376 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2384 sub2video_update(ist, &subtitle);
2386 if (!subtitle.num_rects)
2389 ist->frames_decoded++;
2391 for (i = 0; i < nb_output_streams; i++) {
2392 OutputStream *ost = output_streams[i];
2394 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2395 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2398 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2402 avsubtitle_free(&subtitle);
2406 static int send_filter_eof(InputStream *ist)
2409 for (i = 0; i < ist->nb_filters; i++) {
2410 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2417 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2418 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2422 int eof_reached = 0;
2425 if (!ist->saw_first_ts) {
2426 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2428 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2429 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2430 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2432 ist->saw_first_ts = 1;
2435 if (ist->next_dts == AV_NOPTS_VALUE)
2436 ist->next_dts = ist->dts;
2437 if (ist->next_pts == AV_NOPTS_VALUE)
2438 ist->next_pts = ist->pts;
2442 av_init_packet(&avpkt);
2449 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2450 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2451 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2452 ist->next_pts = ist->pts = ist->dts;
2455 // while we have more to decode or while the decoder did output something on EOF
2456 while (ist->decoding_needed) {
2460 ist->pts = ist->next_pts;
2461 ist->dts = ist->next_dts;
2463 switch (ist->dec_ctx->codec_type) {
2464 case AVMEDIA_TYPE_AUDIO:
2465 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2467 case AVMEDIA_TYPE_VIDEO:
2468 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2469 if (!repeating || !pkt || got_output) {
2470 if (pkt && pkt->duration) {
2471 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2472 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2473 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2474 duration = ((int64_t)AV_TIME_BASE *
2475 ist->dec_ctx->framerate.den * ticks) /
2476 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2479 if(ist->dts != AV_NOPTS_VALUE && duration) {
2480 ist->next_dts += duration;
2482 ist->next_dts = AV_NOPTS_VALUE;
2486 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2488 case AVMEDIA_TYPE_SUBTITLE:
2491 ret = transcode_subtitles(ist, &avpkt, &got_output);
2492 if (!pkt && ret >= 0)
2499 if (ret == AVERROR_EOF) {
2505 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2506 ist->file_index, ist->st->index, av_err2str(ret));
2509 // Decoding might not terminate if we're draining the decoder, and
2510 // the decoder keeps returning an error.
2511 // This should probably be considered a libavcodec issue.
2512 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2521 // During draining, we might get multiple output frames in this loop.
2522 // ffmpeg.c does not drain the filter chain on configuration changes,
2523 // which means if we send multiple frames at once to the filters, and
2524 // one of those frames changes configuration, the buffered frames will
2525 // be lost. This can upset certain FATE tests.
2526 // Decode only 1 frame per call on EOF to appease these FATE tests.
2527 // The ideal solution would be to rewrite decoding to use the new
2528 // decoding API in a better way.
2535 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2536 /* except when looping we need to flush but not to send an EOF */
2537 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2538 int ret = send_filter_eof(ist);
2540 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2545 /* handle stream copy */
2546 if (!ist->decoding_needed) {
2547 ist->dts = ist->next_dts;
2548 switch (ist->dec_ctx->codec_type) {
2549 case AVMEDIA_TYPE_AUDIO:
2550 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2551 ist->dec_ctx->sample_rate;
2553 case AVMEDIA_TYPE_VIDEO:
2554 if (ist->framerate.num) {
2555 // TODO: Remove work-around for c99-to-c89 issue 7
2556 AVRational time_base_q = AV_TIME_BASE_Q;
2557 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2558 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2559 } else if (pkt->duration) {
2560 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2561 } else if(ist->dec_ctx->framerate.num != 0) {
2562 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2563 ist->next_dts += ((int64_t)AV_TIME_BASE *
2564 ist->dec_ctx->framerate.den * ticks) /
2565 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2569 ist->pts = ist->dts;
2570 ist->next_pts = ist->next_dts;
2572 for (i = 0; pkt && i < nb_output_streams; i++) {
2573 OutputStream *ost = output_streams[i];
2575 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2578 do_streamcopy(ist, ost, pkt);
2581 return !eof_reached;
2584 static void print_sdp(void)
2589 AVIOContext *sdp_pb;
2590 AVFormatContext **avc;
2592 for (i = 0; i < nb_output_files; i++) {
2593 if (!output_files[i]->header_written)
2597 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2600 for (i = 0, j = 0; i < nb_output_files; i++) {
2601 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2602 avc[j] = output_files[i]->ctx;
2610 av_sdp_create(avc, j, sdp, sizeof(sdp));
2612 if (!sdp_filename) {
2613 printf("SDP:\n%s\n", sdp);
2616 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2617 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2619 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2620 avio_closep(&sdp_pb);
2621 av_freep(&sdp_filename);
2629 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2632 for (i = 0; hwaccels[i].name; i++)
2633 if (hwaccels[i].pix_fmt == pix_fmt)
2634 return &hwaccels[i];
2638 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2640 InputStream *ist = s->opaque;
2641 const enum AVPixelFormat *p;
2644 for (p = pix_fmts; *p != -1; p++) {
2645 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2646 const HWAccel *hwaccel;
2648 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2651 hwaccel = get_hwaccel(*p);
2653 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2654 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2657 ret = hwaccel->init(s);
2659 if (ist->hwaccel_id == hwaccel->id) {
2660 av_log(NULL, AV_LOG_FATAL,
2661 "%s hwaccel requested for input stream #%d:%d, "
2662 "but cannot be initialized.\n", hwaccel->name,
2663 ist->file_index, ist->st->index);
2664 return AV_PIX_FMT_NONE;
2669 if (ist->hw_frames_ctx) {
2670 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2671 if (!s->hw_frames_ctx)
2672 return AV_PIX_FMT_NONE;
2675 ist->active_hwaccel_id = hwaccel->id;
2676 ist->hwaccel_pix_fmt = *p;
2683 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2685 InputStream *ist = s->opaque;
2687 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2688 return ist->hwaccel_get_buffer(s, frame, flags);
2690 return avcodec_default_get_buffer2(s, frame, flags);
2693 static int init_input_stream(int ist_index, char *error, int error_len)
2696 InputStream *ist = input_streams[ist_index];
2698 if (ist->decoding_needed) {
2699 AVCodec *codec = ist->dec;
2701 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2702 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2703 return AVERROR(EINVAL);
2706 ist->dec_ctx->opaque = ist;
2707 ist->dec_ctx->get_format = get_format;
2708 ist->dec_ctx->get_buffer2 = get_buffer;
2709 ist->dec_ctx->thread_safe_callbacks = 1;
2711 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2712 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2713 (ist->decoding_needed & DECODING_FOR_OST)) {
2714 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2715 if (ist->decoding_needed & DECODING_FOR_FILTER)
2716 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2719 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2721 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2722 * audio, and video decoders such as cuvid or mediacodec */
2723 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2725 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2726 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2727 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2728 if (ret == AVERROR_EXPERIMENTAL)
2729 abort_codec_experimental(codec, 0);
2731 snprintf(error, error_len,
2732 "Error while opening decoder for input stream "
2734 ist->file_index, ist->st->index, av_err2str(ret));
2737 assert_avoptions(ist->decoder_opts);
2740 ist->next_pts = AV_NOPTS_VALUE;
2741 ist->next_dts = AV_NOPTS_VALUE;
2746 static InputStream *get_input_stream(OutputStream *ost)
2748 if (ost->source_index >= 0)
2749 return input_streams[ost->source_index];
2753 static int compare_int64(const void *a, const void *b)
2755 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2758 /* open the muxer when all the streams are initialized */
2759 static int check_init_output_file(OutputFile *of, int file_index)
2763 for (i = 0; i < of->ctx->nb_streams; i++) {
2764 OutputStream *ost = output_streams[of->ost_index + i];
2765 if (!ost->initialized)
2769 of->ctx->interrupt_callback = int_cb;
2771 ret = avformat_write_header(of->ctx, &of->opts);
2773 av_log(NULL, AV_LOG_ERROR,
2774 "Could not write header for output file #%d "
2775 "(incorrect codec parameters ?): %s",
2776 file_index, av_err2str(ret));
2779 //assert_avoptions(of->opts);
2780 of->header_written = 1;
2782 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2784 if (sdp_filename || want_sdp)
2787 /* flush the muxing queues */
2788 for (i = 0; i < of->ctx->nb_streams; i++) {
2789 OutputStream *ost = output_streams[of->ost_index + i];
2791 while (av_fifo_size(ost->muxing_queue)) {
2793 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2794 write_packet(of, &pkt, ost);
2801 static int init_output_bsfs(OutputStream *ost)
2806 if (!ost->nb_bitstream_filters)
2809 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2810 ctx = ost->bsf_ctx[i];
2812 ret = avcodec_parameters_copy(ctx->par_in,
2813 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2817 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2819 ret = av_bsf_init(ctx);
2821 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2822 ost->bsf_ctx[i]->filter->name);
2827 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2828 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2832 ost->st->time_base = ctx->time_base_out;
2837 static int init_output_stream_streamcopy(OutputStream *ost)
2839 OutputFile *of = output_files[ost->file_index];
2840 InputStream *ist = get_input_stream(ost);
2841 AVCodecParameters *par_dst = ost->st->codecpar;
2842 AVCodecParameters *par_src = ost->ref_par;
2845 uint64_t extra_size;
2847 av_assert0(ist && !ost->filter);
2849 avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2850 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2852 av_log(NULL, AV_LOG_FATAL,
2853 "Error setting up codec context options.\n");
2856 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2858 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2860 if (extra_size > INT_MAX) {
2861 return AVERROR(EINVAL);
2864 /* if stream_copy is selected, no need to decode or encode */
2865 par_dst->codec_id = par_src->codec_id;
2866 par_dst->codec_type = par_src->codec_type;
2868 if (!par_dst->codec_tag) {
2869 unsigned int codec_tag;
2870 if (!of->ctx->oformat->codec_tag ||
2871 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2872 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2873 par_dst->codec_tag = par_src->codec_tag;
2876 par_dst->bit_rate = par_src->bit_rate;
2877 par_dst->field_order = par_src->field_order;
2878 par_dst->chroma_location = par_src->chroma_location;
2880 if (par_src->extradata_size) {
2881 par_dst->extradata = av_mallocz(extra_size);
2882 if (!par_dst->extradata) {
2883 return AVERROR(ENOMEM);
2885 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2886 par_dst->extradata_size = par_src->extradata_size;
2888 par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2889 par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2891 if (!ost->frame_rate.num)
2892 ost->frame_rate = ist->framerate;
2893 ost->st->avg_frame_rate = ost->frame_rate;
2895 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2899 // copy timebase while removing common factors
2900 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2902 if (ist->st->nb_side_data) {
2903 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2904 sizeof(*ist->st->side_data));
2905 if (!ost->st->side_data)
2906 return AVERROR(ENOMEM);
2908 ost->st->nb_side_data = 0;
2909 for (i = 0; i < ist->st->nb_side_data; i++) {
2910 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2911 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2913 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2916 sd_dst->data = av_malloc(sd_src->size);
2918 return AVERROR(ENOMEM);
2919 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2920 sd_dst->size = sd_src->size;
2921 sd_dst->type = sd_src->type;
2922 ost->st->nb_side_data++;
2926 ost->parser = av_parser_init(par_dst->codec_id);
2927 ost->parser_avctx = avcodec_alloc_context3(NULL);
2928 if (!ost->parser_avctx)
2929 return AVERROR(ENOMEM);
2931 switch (par_dst->codec_type) {
2932 case AVMEDIA_TYPE_AUDIO:
2933 if (audio_volume != 256) {
2934 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2937 par_dst->channel_layout = par_src->channel_layout;
2938 par_dst->sample_rate = par_src->sample_rate;
2939 par_dst->channels = par_src->channels;
2940 par_dst->frame_size = par_src->frame_size;
2941 par_dst->block_align = par_src->block_align;
2942 par_dst->initial_padding = par_src->initial_padding;
2943 par_dst->trailing_padding = par_src->trailing_padding;
2944 par_dst->profile = par_src->profile;
2945 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2946 par_dst->block_align= 0;
2947 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2948 par_dst->block_align= 0;
2950 case AVMEDIA_TYPE_VIDEO:
2951 par_dst->format = par_src->format;
2952 par_dst->color_space = par_src->color_space;
2953 par_dst->color_range = par_src->color_range;
2954 par_dst->color_primaries = par_src->color_primaries;
2955 par_dst->color_trc = par_src->color_trc;
2956 par_dst->width = par_src->width;
2957 par_dst->height = par_src->height;
2958 par_dst->video_delay = par_src->video_delay;
2959 par_dst->profile = par_src->profile;
2960 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2962 av_mul_q(ost->frame_aspect_ratio,
2963 (AVRational){ par_dst->height, par_dst->width });
2964 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2965 "with stream copy may produce invalid files\n");
2967 else if (ist->st->sample_aspect_ratio.num)
2968 sar = ist->st->sample_aspect_ratio;
2970 sar = par_src->sample_aspect_ratio;
2971 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2972 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2973 ost->st->r_frame_rate = ist->st->r_frame_rate;
2975 case AVMEDIA_TYPE_SUBTITLE:
2976 par_dst->width = par_src->width;
2977 par_dst->height = par_src->height;
2979 case AVMEDIA_TYPE_UNKNOWN:
2980 case AVMEDIA_TYPE_DATA:
2981 case AVMEDIA_TYPE_ATTACHMENT:
2990 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2994 if (ost->encoding_needed) {
2995 AVCodec *codec = ost->enc;
2996 AVCodecContext *dec = NULL;
2999 if ((ist = get_input_stream(ost)))
3001 if (dec && dec->subtitle_header) {
3002 /* ASS code assumes this buffer is null terminated so add extra byte. */
3003 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3004 if (!ost->enc_ctx->subtitle_header)
3005 return AVERROR(ENOMEM);
3006 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3007 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3009 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3010 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3011 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3013 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3014 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3015 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3017 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3018 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
3019 if (!ost->enc_ctx->hw_frames_ctx)
3020 return AVERROR(ENOMEM);
3023 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3024 if (ret == AVERROR_EXPERIMENTAL)
3025 abort_codec_experimental(codec, 1);
3026 snprintf(error, error_len,
3027 "Error while opening encoder for output stream #%d:%d - "
3028 "maybe incorrect parameters such as bit_rate, rate, width or height",
3029 ost->file_index, ost->index);
3032 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3033 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3034 av_buffersink_set_frame_size(ost->filter->filter,
3035 ost->enc_ctx->frame_size);
3036 assert_avoptions(ost->encoder_opts);
3037 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3038 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3039 " It takes bits/s as argument, not kbits/s\n");
3041 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3043 av_log(NULL, AV_LOG_FATAL,
3044 "Error initializing the output stream codec context.\n");
3048 * FIXME: ost->st->codec should't be needed here anymore.
3050 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3054 if (ost->enc_ctx->nb_coded_side_data) {
3057 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3058 sizeof(*ost->st->side_data));
3059 if (!ost->st->side_data)
3060 return AVERROR(ENOMEM);
3062 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3063 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3064 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3066 sd_dst->data = av_malloc(sd_src->size);
3068 return AVERROR(ENOMEM);
3069 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3070 sd_dst->size = sd_src->size;
3071 sd_dst->type = sd_src->type;
3072 ost->st->nb_side_data++;
3076 // copy timebase while removing common factors
3077 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3078 ost->st->codec->codec= ost->enc_ctx->codec;
3079 } else if (ost->stream_copy) {
3080 ret = init_output_stream_streamcopy(ost);
3085 * FIXME: will the codec context used by the parser during streamcopy
3086 * This should go away with the new parser API.
3088 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3093 /* initialize bitstream filters for the output stream
3094 * needs to be done here, because the codec id for streamcopy is not
3095 * known until now */
3096 ret = init_output_bsfs(ost);
3100 ost->initialized = 1;
3102 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3109 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3110 AVCodecContext *avctx)
3113 int n = 1, i, size, index = 0;
3116 for (p = kf; *p; p++)
3120 pts = av_malloc_array(size, sizeof(*pts));
3122 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3127 for (i = 0; i < n; i++) {
3128 char *next = strchr(p, ',');
3133 if (!memcmp(p, "chapters", 8)) {
3135 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3138 if (avf->nb_chapters > INT_MAX - size ||
3139 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3141 av_log(NULL, AV_LOG_FATAL,
3142 "Could not allocate forced key frames array.\n");
3145 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3146 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3148 for (j = 0; j < avf->nb_chapters; j++) {
3149 AVChapter *c = avf->chapters[j];
3150 av_assert1(index < size);
3151 pts[index++] = av_rescale_q(c->start, c->time_base,
3152 avctx->time_base) + t;
3157 t = parse_time_or_die("force_key_frames", p, 1);
3158 av_assert1(index < size);
3159 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3166 av_assert0(index == size);
3167 qsort(pts, size, sizeof(*pts), compare_int64);
3168 ost->forced_kf_count = size;
3169 ost->forced_kf_pts = pts;
3172 static void report_new_stream(int input_index, AVPacket *pkt)
3174 InputFile *file = input_files[input_index];
3175 AVStream *st = file->ctx->streams[pkt->stream_index];
3177 if (pkt->stream_index < file->nb_streams_warn)
3179 av_log(file->ctx, AV_LOG_WARNING,
3180 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3181 av_get_media_type_string(st->codecpar->codec_type),
3182 input_index, pkt->stream_index,
3183 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3184 file->nb_streams_warn = pkt->stream_index + 1;
3187 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3189 AVDictionaryEntry *e;
3191 uint8_t *encoder_string;
3192 int encoder_string_len;
3193 int format_flags = 0;
3194 int codec_flags = 0;
3196 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3199 e = av_dict_get(of->opts, "fflags", NULL, 0);
3201 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3204 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3206 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3208 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3211 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3214 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3215 encoder_string = av_mallocz(encoder_string_len);
3216 if (!encoder_string)
3219 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3220 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3222 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3223 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3224 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3225 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3228 static int transcode_init(void)
3230 int ret = 0, i, j, k;
3231 AVFormatContext *oc;
3234 char error[1024] = {0};
3236 for (i = 0; i < nb_filtergraphs; i++) {
3237 FilterGraph *fg = filtergraphs[i];
3238 for (j = 0; j < fg->nb_outputs; j++) {
3239 OutputFilter *ofilter = fg->outputs[j];
3240 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3242 if (fg->nb_inputs != 1)
3244 for (k = nb_input_streams-1; k >= 0 ; k--)
3245 if (fg->inputs[0]->ist == input_streams[k])
3247 ofilter->ost->source_index = k;
3251 /* init framerate emulation */
3252 for (i = 0; i < nb_input_files; i++) {
3253 InputFile *ifile = input_files[i];
3254 if (ifile->rate_emu)
3255 for (j = 0; j < ifile->nb_streams; j++)
3256 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3259 /* for each output stream, we compute the right encoding parameters */
3260 for (i = 0; i < nb_output_streams; i++) {
3261 ost = output_streams[i];
3262 oc = output_files[ost->file_index]->ctx;
3263 ist = get_input_stream(ost);
3265 if (ost->attachment_filename)
3269 ost->st->disposition = ist->st->disposition;
3271 for (j=0; j<oc->nb_streams; j++) {
3272 AVStream *st = oc->streams[j];
3273 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3276 if (j == oc->nb_streams)
3277 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3278 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3279 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3282 if (!ost->stream_copy) {
3283 AVCodecContext *enc_ctx = ost->enc_ctx;
3284 AVCodecContext *dec_ctx = NULL;
3286 set_encoder_id(output_files[ost->file_index], ost);
3289 dec_ctx = ist->dec_ctx;
3291 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3295 if (qsv_transcode_init(ost))
3300 if (cuvid_transcode_init(ost))
3304 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3305 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3306 filtergraph_is_simple(ost->filter->graph)) {
3307 FilterGraph *fg = ost->filter->graph;
3308 if (configure_filtergraph(fg)) {
3309 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3314 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3315 if (!ost->frame_rate.num)
3316 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3317 if (ist && !ost->frame_rate.num)
3318 ost->frame_rate = ist->framerate;
3319 if (ist && !ost->frame_rate.num)
3320 ost->frame_rate = ist->st->r_frame_rate;
3321 if (ist && !ost->frame_rate.num) {
3322 ost->frame_rate = (AVRational){25, 1};
3323 av_log(NULL, AV_LOG_WARNING,
3325 "about the input framerate is available. Falling "
3326 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3327 "if you want a different framerate.\n",
3328 ost->file_index, ost->index);
3330 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3331 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3332 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3333 ost->frame_rate = ost->enc->supported_framerates[idx];
3335 // reduce frame rate for mpeg4 to be within the spec limits
3336 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3337 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3338 ost->frame_rate.num, ost->frame_rate.den, 65535);
3342 switch (enc_ctx->codec_type) {
3343 case AVMEDIA_TYPE_AUDIO:
3344 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3346 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3347 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3348 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3349 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3350 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3351 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3353 case AVMEDIA_TYPE_VIDEO:
3354 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3355 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3356 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3357 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3358 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3359 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3360 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3362 for (j = 0; j < ost->forced_kf_count; j++)
3363 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3365 enc_ctx->time_base);
3367 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3368 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3369 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3370 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3371 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3372 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3373 if (!strncmp(ost->enc->name, "libx264", 7) &&
3374 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3375 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3376 av_log(NULL, AV_LOG_WARNING,
3377 "No pixel format specified, %s for H.264 encoding chosen.\n"
3378 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3379 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3380 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3381 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3382 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3383 av_log(NULL, AV_LOG_WARNING,
3384 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3385 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3386 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3387 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3389 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3390 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3392 ost->st->avg_frame_rate = ost->frame_rate;
3395 enc_ctx->width != dec_ctx->width ||
3396 enc_ctx->height != dec_ctx->height ||
3397 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3398 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3401 if (ost->forced_keyframes) {
3402 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3403 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3404 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3406 av_log(NULL, AV_LOG_ERROR,
3407 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3410 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3411 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3412 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3413 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3415 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416 // parse it only for static kf timings
3417 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3418 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3422 case AVMEDIA_TYPE_SUBTITLE:
3423 enc_ctx->time_base = (AVRational){1, 1000};
3424 if (!enc_ctx->width) {
3425 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3429 case AVMEDIA_TYPE_DATA:
3437 if (ost->disposition) {
3438 static const AVOption opts[] = {
3439 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3440 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3441 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3442 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3443 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3444 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3445 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3446 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3447 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3448 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3449 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3450 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3451 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3452 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3455 static const AVClass class = {
3457 .item_name = av_default_item_name,
3459 .version = LIBAVUTIL_VERSION_INT,
3461 const AVClass *pclass = &class;
3463 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3469 /* init input streams */
3470 for (i = 0; i < nb_input_streams; i++)
3471 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3472 for (i = 0; i < nb_output_streams; i++) {
3473 ost = output_streams[i];
3474 avcodec_close(ost->enc_ctx);
3479 /* open each encoder */
3480 for (i = 0; i < nb_output_streams; i++) {
3481 ret = init_output_stream(output_streams[i], error, sizeof(error));
3486 /* discard unused programs */
3487 for (i = 0; i < nb_input_files; i++) {
3488 InputFile *ifile = input_files[i];
3489 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3490 AVProgram *p = ifile->ctx->programs[j];
3491 int discard = AVDISCARD_ALL;
3493 for (k = 0; k < p->nb_stream_indexes; k++)
3494 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3495 discard = AVDISCARD_DEFAULT;
3498 p->discard = discard;
3502 /* write headers for files with no streams */
3503 for (i = 0; i < nb_output_files; i++) {
3504 oc = output_files[i]->ctx;
3505 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3506 ret = check_init_output_file(output_files[i], i);
3513 /* dump the stream mapping */
3514 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3515 for (i = 0; i < nb_input_streams; i++) {
3516 ist = input_streams[i];
3518 for (j = 0; j < ist->nb_filters; j++) {
3519 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3520 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3521 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3522 ist->filters[j]->name);
3523 if (nb_filtergraphs > 1)
3524 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3525 av_log(NULL, AV_LOG_INFO, "\n");
3530 for (i = 0; i < nb_output_streams; i++) {
3531 ost = output_streams[i];
3533 if (ost->attachment_filename) {
3534 /* an attached file */
3535 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3536 ost->attachment_filename, ost->file_index, ost->index);
3540 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3541 /* output from a complex graph */
3542 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3543 if (nb_filtergraphs > 1)
3544 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3546 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3547 ost->index, ost->enc ? ost->enc->name : "?");
3551 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3552 input_streams[ost->source_index]->file_index,
3553 input_streams[ost->source_index]->st->index,
3556 if (ost->sync_ist != input_streams[ost->source_index])
3557 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3558 ost->sync_ist->file_index,
3559 ost->sync_ist->st->index);
3560 if (ost->stream_copy)
3561 av_log(NULL, AV_LOG_INFO, " (copy)");
3563 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3564 const AVCodec *out_codec = ost->enc;
3565 const char *decoder_name = "?";
3566 const char *in_codec_name = "?";
3567 const char *encoder_name = "?";
3568 const char *out_codec_name = "?";
3569 const AVCodecDescriptor *desc;
3572 decoder_name = in_codec->name;
3573 desc = avcodec_descriptor_get(in_codec->id);
3575 in_codec_name = desc->name;
3576 if (!strcmp(decoder_name, in_codec_name))
3577 decoder_name = "native";
3581 encoder_name = out_codec->name;
3582 desc = avcodec_descriptor_get(out_codec->id);
3584 out_codec_name = desc->name;
3585 if (!strcmp(encoder_name, out_codec_name))
3586 encoder_name = "native";
3589 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3590 in_codec_name, decoder_name,
3591 out_codec_name, encoder_name);
3593 av_log(NULL, AV_LOG_INFO, "\n");
3597 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3601 transcode_init_done = 1;
3606 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3607 static int need_output(void)
3611 for (i = 0; i < nb_output_streams; i++) {
3612 OutputStream *ost = output_streams[i];
3613 OutputFile *of = output_files[ost->file_index];
3614 AVFormatContext *os = output_files[ost->file_index]->ctx;
3616 if (ost->finished ||
3617 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3619 if (ost->frame_number >= ost->max_frames) {
3621 for (j = 0; j < of->ctx->nb_streams; j++)
3622 close_output_stream(output_streams[of->ost_index + j]);
3633 * Select the output stream to process.
3635 * @return selected output stream, or NULL if none available
3637 static OutputStream *choose_output(void)
3640 int64_t opts_min = INT64_MAX;
3641 OutputStream *ost_min = NULL;
3643 for (i = 0; i < nb_output_streams; i++) {
3644 OutputStream *ost = output_streams[i];
3645 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3646 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3648 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3649 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3651 if (!ost->finished && opts < opts_min) {
3653 ost_min = ost->unavailable ? NULL : ost;
3659 static void set_tty_echo(int on)
3663 if (tcgetattr(0, &tty) == 0) {
3664 if (on) tty.c_lflag |= ECHO;
3665 else tty.c_lflag &= ~ECHO;
3666 tcsetattr(0, TCSANOW, &tty);
3671 static int check_keyboard_interaction(int64_t cur_time)
3674 static int64_t last_time;
3675 if (received_nb_signals)
3676 return AVERROR_EXIT;
3677 /* read_key() returns 0 on EOF */
3678 if(cur_time - last_time >= 100000 && !run_as_daemon){
3680 last_time = cur_time;
3684 return AVERROR_EXIT;
3685 if (key == '+') av_log_set_level(av_log_get_level()+10);
3686 if (key == '-') av_log_set_level(av_log_get_level()-10);
3687 if (key == 's') qp_hist ^= 1;
3690 do_hex_dump = do_pkt_dump = 0;
3691 } else if(do_pkt_dump){
3695 av_log_set_level(AV_LOG_DEBUG);
3697 if (key == 'c' || key == 'C'){
3698 char buf[4096], target[64], command[256], arg[256] = {0};
3701 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3704 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3709 fprintf(stderr, "\n");
3711 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3712 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3713 target, time, command, arg);
3714 for (i = 0; i < nb_filtergraphs; i++) {
3715 FilterGraph *fg = filtergraphs[i];
3718 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3719 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3720 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3721 } else if (key == 'c') {
3722 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3723 ret = AVERROR_PATCHWELCOME;
3725 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3727 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3732 av_log(NULL, AV_LOG_ERROR,
3733 "Parse error, at least 3 arguments were expected, "
3734 "only %d given in string '%s'\n", n, buf);
3737 if (key == 'd' || key == 'D'){
3740 debug = input_streams[0]->st->codec->debug<<1;
3741 if(!debug) debug = 1;
3742 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3749 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3754 fprintf(stderr, "\n");
3755 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3756 fprintf(stderr,"error parsing debug value\n");
3758 for(i=0;i<nb_input_streams;i++) {
3759 input_streams[i]->st->codec->debug = debug;
3761 for(i=0;i<nb_output_streams;i++) {
3762 OutputStream *ost = output_streams[i];
3763 ost->enc_ctx->debug = debug;
3765 if(debug) av_log_set_level(AV_LOG_DEBUG);
3766 fprintf(stderr,"debug=%d\n", debug);
3769 fprintf(stderr, "key function\n"
3770 "? show this help\n"
3771 "+ increase verbosity\n"
3772 "- decrease verbosity\n"
3773 "c Send command to first matching filter supporting it\n"
3774 "C Send/Queue command to all matching filters\n"
3775 "D cycle through available debug modes\n"
3776 "h dump packets/hex press to cycle through the 3 states\n"
3778 "s Show QP histogram\n"
3785 static void *input_thread(void *arg)
3788 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3793 ret = av_read_frame(f->ctx, &pkt);
3795 if (ret == AVERROR(EAGAIN)) {
3800 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3803 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3804 if (flags && ret == AVERROR(EAGAIN)) {
3806 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3807 av_log(f->ctx, AV_LOG_WARNING,
3808 "Thread message queue blocking; consider raising the "
3809 "thread_queue_size option (current value: %d)\n",
3810 f->thread_queue_size);
3813 if (ret != AVERROR_EOF)
3814 av_log(f->ctx, AV_LOG_ERROR,
3815 "Unable to send packet to main thread: %s\n",
3817 av_packet_unref(&pkt);
3818 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3826 static void free_input_threads(void)
3830 for (i = 0; i < nb_input_files; i++) {
3831 InputFile *f = input_files[i];
3834 if (!f || !f->in_thread_queue)
3836 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3837 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3838 av_packet_unref(&pkt);
3840 pthread_join(f->thread, NULL);
3842 av_thread_message_queue_free(&f->in_thread_queue);
3846 static int init_input_threads(void)
3850 if (nb_input_files == 1)
3853 for (i = 0; i < nb_input_files; i++) {
3854 InputFile *f = input_files[i];
3856 if (f->ctx->pb ? !f->ctx->pb->seekable :
3857 strcmp(f->ctx->iformat->name, "lavfi"))
3858 f->non_blocking = 1;
3859 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3860 f->thread_queue_size, sizeof(AVPacket));
3864 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3865 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3866 av_thread_message_queue_free(&f->in_thread_queue);
3867 return AVERROR(ret);
3873 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3875 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3877 AV_THREAD_MESSAGE_NONBLOCK : 0);
3881 static int get_input_packet(InputFile *f, AVPacket *pkt)
3885 for (i = 0; i < f->nb_streams; i++) {
3886 InputStream *ist = input_streams[f->ist_index + i];
3887 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3888 int64_t now = av_gettime_relative() - ist->start;
3890 return AVERROR(EAGAIN);
3895 if (nb_input_files > 1)
3896 return get_input_packet_mt(f, pkt);
3898 return av_read_frame(f->ctx, pkt);
3901 static int got_eagain(void)
3904 for (i = 0; i < nb_output_streams; i++)
3905 if (output_streams[i]->unavailable)
3910 static void reset_eagain(void)
3913 for (i = 0; i < nb_input_files; i++)
3914 input_files[i]->eagain = 0;
3915 for (i = 0; i < nb_output_streams; i++)
3916 output_streams[i]->unavailable = 0;
3919 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3920 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3921 AVRational time_base)
3927 return tmp_time_base;
3930 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3933 return tmp_time_base;
3939 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3942 AVCodecContext *avctx;
3943 int i, ret, has_audio = 0;
3944 int64_t duration = 0;
3946 ret = av_seek_frame(is, -1, is->start_time, 0);
3950 for (i = 0; i < ifile->nb_streams; i++) {
3951 ist = input_streams[ifile->ist_index + i];
3952 avctx = ist->dec_ctx;
3955 if (ist->decoding_needed) {
3956 process_input_packet(ist, NULL, 1);
3957 avcodec_flush_buffers(avctx);
3960 /* duration is the length of the last frame in a stream
3961 * when audio stream is present we don't care about
3962 * last video frame length because it's not defined exactly */
3963 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3967 for (i = 0; i < ifile->nb_streams; i++) {
3968 ist = input_streams[ifile->ist_index + i];
3969 avctx = ist->dec_ctx;
3972 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3973 AVRational sample_rate = {1, avctx->sample_rate};
3975 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3979 if (ist->framerate.num) {
3980 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3981 } else if (ist->st->avg_frame_rate.num) {
3982 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3983 } else duration = 1;
3985 if (!ifile->duration)
3986 ifile->time_base = ist->st->time_base;
3987 /* the total duration of the stream, max_pts - min_pts is
3988 * the duration of the stream without the last frame */
3989 duration += ist->max_pts - ist->min_pts;
3990 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3994 if (ifile->loop > 0)
4002 * - 0 -- one packet was read and processed
4003 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4004 * this function should be called again
4005 * - AVERROR_EOF -- this function should not be called again
4007 static int process_input(int file_index)
4009 InputFile *ifile = input_files[file_index];
4010 AVFormatContext *is;
4018 ret = get_input_packet(ifile, &pkt);
4020 if (ret == AVERROR(EAGAIN)) {
4024 if (ret < 0 && ifile->loop) {
4025 if ((ret = seek_to_start(ifile, is)) < 0)
4027 ret = get_input_packet(ifile, &pkt);
4028 if (ret == AVERROR(EAGAIN)) {
4034 if (ret != AVERROR_EOF) {
4035 print_error(is->filename, ret);
4040 for (i = 0; i < ifile->nb_streams; i++) {
4041 ist = input_streams[ifile->ist_index + i];
4042 if (ist->decoding_needed) {
4043 ret = process_input_packet(ist, NULL, 0);
4048 /* mark all outputs that don't go through lavfi as finished */
4049 for (j = 0; j < nb_output_streams; j++) {
4050 OutputStream *ost = output_streams[j];
4052 if (ost->source_index == ifile->ist_index + i &&
4053 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4054 finish_output_stream(ost);
4058 ifile->eof_reached = 1;
4059 return AVERROR(EAGAIN);
4065 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4066 is->streams[pkt.stream_index]);
4068 /* the following test is needed in case new streams appear
4069 dynamically in stream : we ignore them */
4070 if (pkt.stream_index >= ifile->nb_streams) {
4071 report_new_stream(file_index, &pkt);
4072 goto discard_packet;
4075 ist = input_streams[ifile->ist_index + pkt.stream_index];
4077 ist->data_size += pkt.size;
4081 goto discard_packet;
4083 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4084 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4089 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4090 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4091 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4092 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4093 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4094 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4095 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4096 av_ts2str(input_files[ist->file_index]->ts_offset),
4097 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4100 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4101 int64_t stime, stime2;
4102 // Correcting starttime based on the enabled streams
4103 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4104 // so we instead do it here as part of discontinuity handling
4105 if ( ist->next_dts == AV_NOPTS_VALUE
4106 && ifile->ts_offset == -is->start_time
4107 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4108 int64_t new_start_time = INT64_MAX;
4109 for (i=0; i<is->nb_streams; i++) {
4110 AVStream *st = is->streams[i];
4111 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4113 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4115 if (new_start_time > is->start_time) {
4116 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4117 ifile->ts_offset = -new_start_time;
4121 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4122 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4123 ist->wrap_correction_done = 1;
4125 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4126 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4127 ist->wrap_correction_done = 0;
4129 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4130 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4131 ist->wrap_correction_done = 0;
4135 /* add the stream-global side data to the first packet */
4136 if (ist->nb_packets == 1) {
4137 if (ist->st->nb_side_data)
4138 av_packet_split_side_data(&pkt);
4139 for (i = 0; i < ist->st->nb_side_data; i++) {
4140 AVPacketSideData *src_sd = &ist->st->side_data[i];
4143 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4145 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4148 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4152 memcpy(dst_data, src_sd->data, src_sd->size);
4156 if (pkt.dts != AV_NOPTS_VALUE)
4157 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4158 if (pkt.pts != AV_NOPTS_VALUE)
4159 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4161 if (pkt.pts != AV_NOPTS_VALUE)
4162 pkt.pts *= ist->ts_scale;
4163 if (pkt.dts != AV_NOPTS_VALUE)
4164 pkt.dts *= ist->ts_scale;
4166 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4167 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4168 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4169 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4170 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4171 int64_t delta = pkt_dts - ifile->last_ts;
4172 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4173 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4174 ifile->ts_offset -= delta;
4175 av_log(NULL, AV_LOG_DEBUG,
4176 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4177 delta, ifile->ts_offset);
4178 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4179 if (pkt.pts != AV_NOPTS_VALUE)
4180 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4184 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4185 if (pkt.pts != AV_NOPTS_VALUE) {
4186 pkt.pts += duration;
4187 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4188 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4191 if (pkt.dts != AV_NOPTS_VALUE)
4192 pkt.dts += duration;
4194 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4195 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4196 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4197 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4199 int64_t delta = pkt_dts - ist->next_dts;
4200 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4201 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4202 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4203 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4204 ifile->ts_offset -= delta;
4205 av_log(NULL, AV_LOG_DEBUG,
4206 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4207 delta, ifile->ts_offset);
4208 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4209 if (pkt.pts != AV_NOPTS_VALUE)
4210 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4213 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4214 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4215 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4216 pkt.dts = AV_NOPTS_VALUE;
4218 if (pkt.pts != AV_NOPTS_VALUE){
4219 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4220 delta = pkt_pts - ist->next_dts;
4221 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4222 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4223 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4224 pkt.pts = AV_NOPTS_VALUE;
4230 if (pkt.dts != AV_NOPTS_VALUE)
4231 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4234 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4235 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4236 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4237 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4238 av_ts2str(input_files[ist->file_index]->ts_offset),
4239 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4242 sub2video_heartbeat(ist, pkt.pts);
4244 process_input_packet(ist, &pkt, 0);
4247 av_packet_unref(&pkt);
4253 * Perform a step of transcoding for the specified filter graph.
4255 * @param[in] graph filter graph to consider
4256 * @param[out] best_ist input stream where a frame would allow to continue
4257 * @return 0 for success, <0 for error
4259 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4262 int nb_requests, nb_requests_max = 0;
4263 InputFilter *ifilter;
4267 ret = avfilter_graph_request_oldest(graph->graph);
4269 return reap_filters(0);
4271 if (ret == AVERROR_EOF) {
4272 ret = reap_filters(1);
4273 for (i = 0; i < graph->nb_outputs; i++)
4274 close_output_stream(graph->outputs[i]->ost);
4277 if (ret != AVERROR(EAGAIN))
4280 for (i = 0; i < graph->nb_inputs; i++) {
4281 ifilter = graph->inputs[i];
4283 if (input_files[ist->file_index]->eagain ||
4284 input_files[ist->file_index]->eof_reached)
4286 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4287 if (nb_requests > nb_requests_max) {
4288 nb_requests_max = nb_requests;
4294 for (i = 0; i < graph->nb_outputs; i++)
4295 graph->outputs[i]->ost->unavailable = 1;
4301 * Run a single step of transcoding.
4303 * @return 0 for success, <0 for error
4305 static int transcode_step(void)
4311 ost = choose_output();
4318 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4323 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4328 av_assert0(ost->source_index >= 0);
4329 ist = input_streams[ost->source_index];
4332 ret = process_input(ist->file_index);
4333 if (ret == AVERROR(EAGAIN)) {
4334 if (input_files[ist->file_index]->eagain)
4335 ost->unavailable = 1;
4340 return ret == AVERROR_EOF ? 0 : ret;
4342 return reap_filters(0);
4346 * The following code is the main loop of the file converter
4348 static int transcode(void)
4351 AVFormatContext *os;
4354 int64_t timer_start;
4355 int64_t total_packets_written = 0;
4357 ret = transcode_init();
4361 if (stdin_interaction) {
4362 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4365 timer_start = av_gettime_relative();
4368 if ((ret = init_input_threads()) < 0)
4372 while (!received_sigterm) {
4373 int64_t cur_time= av_gettime_relative();
4375 /* if 'q' pressed, exits */
4376 if (stdin_interaction)
4377 if (check_keyboard_interaction(cur_time) < 0)
4380 /* check if there's any stream where output is still needed */
4381 if (!need_output()) {
4382 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4386 ret = transcode_step();
4387 if (ret < 0 && ret != AVERROR_EOF) {
4389 av_strerror(ret, errbuf, sizeof(errbuf));
4391 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4395 /* dump report by using the output first video and audio streams */
4396 print_report(0, timer_start, cur_time);
4399 free_input_threads();
4402 /* at the end of stream, we must flush the decoder buffers */
4403 for (i = 0; i < nb_input_streams; i++) {
4404 ist = input_streams[i];
4405 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4406 process_input_packet(ist, NULL, 0);
4413 /* write the trailer if needed and close file */
4414 for (i = 0; i < nb_output_files; i++) {
4415 os = output_files[i]->ctx;
4416 if (!output_files[i]->header_written) {
4417 av_log(NULL, AV_LOG_ERROR,
4418 "Nothing was written into output file %d (%s), because "
4419 "at least one of its streams received no packets.\n",
4423 if ((ret = av_write_trailer(os)) < 0) {
4424 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4430 /* dump report by using the first video and audio streams */
4431 print_report(1, timer_start, av_gettime_relative());
4433 /* close each encoder */
4434 for (i = 0; i < nb_output_streams; i++) {
4435 ost = output_streams[i];
4436 if (ost->encoding_needed) {
4437 av_freep(&ost->enc_ctx->stats_in);
4439 total_packets_written += ost->packets_written;
4442 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4443 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4447 /* close each decoder */
4448 for (i = 0; i < nb_input_streams; i++) {
4449 ist = input_streams[i];
4450 if (ist->decoding_needed) {
4451 avcodec_close(ist->dec_ctx);
4452 if (ist->hwaccel_uninit)
4453 ist->hwaccel_uninit(ist->dec_ctx);
4457 av_buffer_unref(&hw_device_ctx);
4464 free_input_threads();
4467 if (output_streams) {
4468 for (i = 0; i < nb_output_streams; i++) {
4469 ost = output_streams[i];
4472 if (fclose(ost->logfile))
4473 av_log(NULL, AV_LOG_ERROR,
4474 "Error closing logfile, loss of information possible: %s\n",
4475 av_err2str(AVERROR(errno)));
4476 ost->logfile = NULL;
4478 av_freep(&ost->forced_kf_pts);
4479 av_freep(&ost->apad);
4480 av_freep(&ost->disposition);
4481 av_dict_free(&ost->encoder_opts);
4482 av_dict_free(&ost->sws_dict);
4483 av_dict_free(&ost->swr_opts);
4484 av_dict_free(&ost->resample_opts);
4492 static int64_t getutime(void)
4495 struct rusage rusage;
4497 getrusage(RUSAGE_SELF, &rusage);
4498 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4499 #elif HAVE_GETPROCESSTIMES
4501 FILETIME c, e, k, u;
4502 proc = GetCurrentProcess();
4503 GetProcessTimes(proc, &c, &e, &k, &u);
4504 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4506 return av_gettime_relative();
4510 static int64_t getmaxrss(void)
4512 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4513 struct rusage rusage;
4514 getrusage(RUSAGE_SELF, &rusage);
4515 return (int64_t)rusage.ru_maxrss * 1024;
4516 #elif HAVE_GETPROCESSMEMORYINFO
4518 PROCESS_MEMORY_COUNTERS memcounters;
4519 proc = GetCurrentProcess();
4520 memcounters.cb = sizeof(memcounters);
4521 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4522 return memcounters.PeakPagefileUsage;
4528 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4532 int main(int argc, char **argv)
4539 register_exit(ffmpeg_cleanup);
4541 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4543 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4544 parse_loglevel(argc, argv, options);
4546 if(argc>1 && !strcmp(argv[1], "-d")){
4548 av_log_set_callback(log_callback_null);
4553 avcodec_register_all();
4555 avdevice_register_all();
4557 avfilter_register_all();
4559 avformat_network_init();
4561 show_banner(argc, argv, options);
4563 /* parse options and open all input/output files */
4564 ret = ffmpeg_parse_options(argc, argv);
4568 if (nb_output_files <= 0 && nb_input_files == 0) {
4570 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4574 /* file converter / grab */
4575 if (nb_output_files <= 0) {
4576 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4580 // if (nb_input_files == 0) {
4581 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4585 for (i = 0; i < nb_output_files; i++) {
4586 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4590 current_time = ti = getutime();
4591 if (transcode() < 0)
4593 ti = getutime() - ti;
4595 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4597 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4598 decode_error_stat[0], decode_error_stat[1]);
4599 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4602 exit_program(received_nb_signals ? 255 : main_return_code);
4603 return main_return_code;