2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
539 av_freep(&ost->bsf_extradata_updated);
541 av_frame_free(&ost->filtered_frame);
542 av_frame_free(&ost->last_frame);
543 av_dict_free(&ost->encoder_opts);
545 av_parser_close(ost->parser);
546 avcodec_free_context(&ost->parser_avctx);
548 av_freep(&ost->forced_keyframes);
549 av_expr_free(ost->forced_keyframes_pexpr);
550 av_freep(&ost->avfilter);
551 av_freep(&ost->logfile_prefix);
553 av_freep(&ost->audio_channels_map);
554 ost->audio_channels_mapped = 0;
556 av_dict_free(&ost->sws_dict);
558 avcodec_free_context(&ost->enc_ctx);
559 avcodec_parameters_free(&ost->ref_par);
561 if (ost->muxing_queue) {
562 while (av_fifo_size(ost->muxing_queue)) {
564 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565 av_packet_unref(&pkt);
567 av_fifo_freep(&ost->muxing_queue);
570 av_freep(&output_streams[i]);
573 free_input_threads();
575 for (i = 0; i < nb_input_files; i++) {
576 avformat_close_input(&input_files[i]->ctx);
577 av_freep(&input_files[i]);
579 for (i = 0; i < nb_input_streams; i++) {
580 InputStream *ist = input_streams[i];
582 av_frame_free(&ist->decoded_frame);
583 av_frame_free(&ist->filter_frame);
584 av_dict_free(&ist->decoder_opts);
585 avsubtitle_free(&ist->prev_sub.subtitle);
586 av_frame_free(&ist->sub2video.frame);
587 av_freep(&ist->filters);
588 av_freep(&ist->hwaccel_device);
589 av_freep(&ist->dts_buffer);
591 avcodec_free_context(&ist->dec_ctx);
593 av_freep(&input_streams[i]);
597 if (fclose(vstats_file))
598 av_log(NULL, AV_LOG_ERROR,
599 "Error closing vstats file, loss of information possible: %s\n",
600 av_err2str(AVERROR(errno)));
602 av_freep(&vstats_filename);
604 av_freep(&input_streams);
605 av_freep(&input_files);
606 av_freep(&output_streams);
607 av_freep(&output_files);
611 avformat_network_deinit();
613 if (received_sigterm) {
614 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615 (int) received_sigterm);
616 } else if (ret && atomic_load(&transcode_init_done)) {
617 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
623 void remove_avoptions(AVDictionary **a, AVDictionary *b)
625 AVDictionaryEntry *t = NULL;
627 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
632 void assert_avoptions(AVDictionary *m)
634 AVDictionaryEntry *t;
635 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
641 static void abort_codec_experimental(AVCodec *c, int encoder)
646 static void update_benchmark(const char *fmt, ...)
648 if (do_benchmark_all) {
649 int64_t t = getutime();
655 vsnprintf(buf, sizeof(buf), fmt, va);
657 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
663 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
666 for (i = 0; i < nb_output_streams; i++) {
667 OutputStream *ost2 = output_streams[i];
668 ost2->finished |= ost == ost2 ? this_stream : others;
672 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
674 AVFormatContext *s = of->ctx;
675 AVStream *st = ost->st;
678 if (!of->header_written) {
679 AVPacket tmp_pkt = {0};
680 /* the muxer is not initialized yet, buffer the packet */
681 if (!av_fifo_space(ost->muxing_queue)) {
682 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
683 ost->max_muxing_queue_size);
684 if (new_size <= av_fifo_size(ost->muxing_queue)) {
685 av_log(NULL, AV_LOG_ERROR,
686 "Too many packets buffered for output stream %d:%d.\n",
687 ost->file_index, ost->st->index);
690 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
694 ret = av_packet_ref(&tmp_pkt, pkt);
697 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
698 av_packet_unref(pkt);
702 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
703 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
704 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
707 * Audio encoders may split the packets -- #frames in != #packets out.
708 * But there is no reordering, so we can limit the number of output packets
709 * by simply dropping them here.
710 * Counting encoded video frames needs to be done separately because of
711 * reordering, see do_video_out()
713 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
714 if (ost->frame_number >= ost->max_frames) {
715 av_packet_unref(pkt);
720 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
722 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
724 ost->quality = sd ? AV_RL32(sd) : -1;
725 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
727 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
729 ost->error[i] = AV_RL64(sd + 8 + 8*i);
734 if (ost->frame_rate.num && ost->is_cfr) {
735 if (pkt->duration > 0)
736 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
737 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
742 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
744 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
745 if (pkt->dts != AV_NOPTS_VALUE &&
746 pkt->pts != AV_NOPTS_VALUE &&
747 pkt->dts > pkt->pts) {
748 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
750 ost->file_index, ost->st->index);
752 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
753 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
754 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
756 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
757 pkt->dts != AV_NOPTS_VALUE &&
758 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
759 ost->last_mux_dts != AV_NOPTS_VALUE) {
760 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
761 if (pkt->dts < max) {
762 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
763 av_log(s, loglevel, "Non-monotonous DTS in output stream "
764 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
765 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
767 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
770 av_log(s, loglevel, "changing to %"PRId64". This may result "
771 "in incorrect timestamps in the output file.\n",
773 if (pkt->pts >= pkt->dts)
774 pkt->pts = FFMAX(pkt->pts, max);
779 ost->last_mux_dts = pkt->dts;
781 ost->data_size += pkt->size;
782 ost->packets_written++;
784 pkt->stream_index = ost->index;
787 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
788 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
789 av_get_media_type_string(ost->enc_ctx->codec_type),
790 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
791 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
796 ret = av_interleaved_write_frame(s, pkt);
798 print_error("av_interleaved_write_frame()", ret);
799 main_return_code = 1;
800 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
802 av_packet_unref(pkt);
805 static void close_output_stream(OutputStream *ost)
807 OutputFile *of = output_files[ost->file_index];
809 ost->finished |= ENCODER_FINISHED;
811 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
812 of->recording_time = FFMIN(of->recording_time, end);
816 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
820 /* apply the output bitstream filters, if any */
821 if (ost->nb_bitstream_filters) {
824 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
830 /* get a packet from the previous filter up the chain */
831 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
832 if (ret == AVERROR(EAGAIN)) {
838 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
839 * the api states this shouldn't happen after init(). Propagate it here to the
840 * muxer and to the next filters in the chain to workaround this.
841 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
842 * par_out->extradata and adapt muxers accordingly to get rid of this. */
843 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
844 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
847 ost->bsf_extradata_updated[idx - 1] |= 1;
850 /* send it to the next filter down the chain or to the muxer */
851 if (idx < ost->nb_bitstream_filters) {
852 /* HACK/FIXME! - See above */
853 if (!(ost->bsf_extradata_updated[idx] & 2)) {
854 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
857 ost->bsf_extradata_updated[idx] |= 2;
859 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
864 write_packet(of, pkt, ost);
867 write_packet(of, pkt, ost);
870 if (ret < 0 && ret != AVERROR_EOF) {
871 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
872 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
878 static int check_recording_time(OutputStream *ost)
880 OutputFile *of = output_files[ost->file_index];
882 if (of->recording_time != INT64_MAX &&
883 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
884 AV_TIME_BASE_Q) >= 0) {
885 close_output_stream(ost);
891 static void do_audio_out(OutputFile *of, OutputStream *ost,
894 AVCodecContext *enc = ost->enc_ctx;
898 av_init_packet(&pkt);
902 if (!check_recording_time(ost))
905 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
906 frame->pts = ost->sync_opts;
907 ost->sync_opts = frame->pts + frame->nb_samples;
908 ost->samples_encoded += frame->nb_samples;
909 ost->frames_encoded++;
911 av_assert0(pkt.size || !pkt.data);
912 update_benchmark(NULL);
914 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
915 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
916 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
917 enc->time_base.num, enc->time_base.den);
920 ret = avcodec_send_frame(enc, frame);
925 ret = avcodec_receive_packet(enc, &pkt);
926 if (ret == AVERROR(EAGAIN))
931 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
933 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
936 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
937 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
938 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
939 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
942 output_packet(of, &pkt, ost);
947 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
951 static void do_subtitle_out(OutputFile *of,
955 int subtitle_out_max_size = 1024 * 1024;
956 int subtitle_out_size, nb, i;
961 if (sub->pts == AV_NOPTS_VALUE) {
962 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
971 subtitle_out = av_malloc(subtitle_out_max_size);
973 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
978 /* Note: DVB subtitle need one packet to draw them and one other
979 packet to clear them */
980 /* XXX: signal it in the codec context ? */
981 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
986 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
988 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
989 pts -= output_files[ost->file_index]->start_time;
990 for (i = 0; i < nb; i++) {
991 unsigned save_num_rects = sub->num_rects;
993 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
994 if (!check_recording_time(ost))
998 // start_display_time is required to be 0
999 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1000 sub->end_display_time -= sub->start_display_time;
1001 sub->start_display_time = 0;
1005 ost->frames_encoded++;
1007 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1008 subtitle_out_max_size, sub);
1010 sub->num_rects = save_num_rects;
1011 if (subtitle_out_size < 0) {
1012 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1016 av_init_packet(&pkt);
1017 pkt.data = subtitle_out;
1018 pkt.size = subtitle_out_size;
1019 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1020 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1021 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1022 /* XXX: the pts correction is handled here. Maybe handling
1023 it in the codec would be better */
1025 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030 output_packet(of, &pkt, ost);
1034 static void do_video_out(OutputFile *of,
1036 AVFrame *next_picture,
1039 int ret, format_video_sync;
1041 AVCodecContext *enc = ost->enc_ctx;
1042 AVCodecParameters *mux_par = ost->st->codecpar;
1043 AVRational frame_rate;
1044 int nb_frames, nb0_frames, i;
1045 double delta, delta0;
1046 double duration = 0;
1048 InputStream *ist = NULL;
1049 AVFilterContext *filter = ost->filter->filter;
1051 if (ost->source_index >= 0)
1052 ist = input_streams[ost->source_index];
1054 frame_rate = av_buffersink_get_frame_rate(filter);
1055 if (frame_rate.num > 0 && frame_rate.den > 0)
1056 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1058 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1059 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1061 if (!ost->filters_script &&
1065 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1066 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1069 if (!next_picture) {
1071 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1072 ost->last_nb0_frames[1],
1073 ost->last_nb0_frames[2]);
1075 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1076 delta = delta0 + duration;
1078 /* by default, we output a single frame */
1079 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1082 format_video_sync = video_sync_method;
1083 if (format_video_sync == VSYNC_AUTO) {
1084 if(!strcmp(of->ctx->oformat->name, "avi")) {
1085 format_video_sync = VSYNC_VFR;
1087 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1089 && format_video_sync == VSYNC_CFR
1090 && input_files[ist->file_index]->ctx->nb_streams == 1
1091 && input_files[ist->file_index]->input_ts_offset == 0) {
1092 format_video_sync = VSYNC_VSCFR;
1094 if (format_video_sync == VSYNC_CFR && copy_ts) {
1095 format_video_sync = VSYNC_VSCFR;
1098 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1102 format_video_sync != VSYNC_PASSTHROUGH &&
1103 format_video_sync != VSYNC_DROP) {
1104 if (delta0 < -0.6) {
1105 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1107 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1108 sync_ipts = ost->sync_opts;
1113 switch (format_video_sync) {
1115 if (ost->frame_number == 0 && delta0 >= 0.5) {
1116 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1119 ost->sync_opts = lrint(sync_ipts);
1122 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1123 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1125 } else if (delta < -1.1)
1127 else if (delta > 1.1) {
1128 nb_frames = lrintf(delta);
1130 nb0_frames = lrintf(delta0 - 0.6);
1136 else if (delta > 0.6)
1137 ost->sync_opts = lrint(sync_ipts);
1140 case VSYNC_PASSTHROUGH:
1141 ost->sync_opts = lrint(sync_ipts);
1148 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1149 nb0_frames = FFMIN(nb0_frames, nb_frames);
1151 memmove(ost->last_nb0_frames + 1,
1152 ost->last_nb0_frames,
1153 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1154 ost->last_nb0_frames[0] = nb0_frames;
1156 if (nb0_frames == 0 && ost->last_dropped) {
1158 av_log(NULL, AV_LOG_VERBOSE,
1159 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1160 ost->frame_number, ost->st->index, ost->last_frame->pts);
1162 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1163 if (nb_frames > dts_error_threshold * 30) {
1164 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1168 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1169 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1170 if (nb_frames_dup > dup_warning) {
1171 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1175 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1177 /* duplicates frame if needed */
1178 for (i = 0; i < nb_frames; i++) {
1179 AVFrame *in_picture;
1180 av_init_packet(&pkt);
1184 if (i < nb0_frames && ost->last_frame) {
1185 in_picture = ost->last_frame;
1187 in_picture = next_picture;
1192 in_picture->pts = ost->sync_opts;
1195 if (!check_recording_time(ost))
1197 if (ost->frame_number >= ost->max_frames)
1201 #if FF_API_LAVF_FMT_RAWPICTURE
1202 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1203 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1204 /* raw pictures are written as AVPicture structure to
1205 avoid any copies. We support temporarily the older
1207 if (in_picture->interlaced_frame)
1208 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1210 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1211 pkt.data = (uint8_t *)in_picture;
1212 pkt.size = sizeof(AVPicture);
1213 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1214 pkt.flags |= AV_PKT_FLAG_KEY;
1216 output_packet(of, &pkt, ost);
1220 int forced_keyframe = 0;
1223 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1224 ost->top_field_first >= 0)
1225 in_picture->top_field_first = !!ost->top_field_first;
1227 if (in_picture->interlaced_frame) {
1228 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1229 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1231 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1233 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1235 in_picture->quality = enc->global_quality;
1236 in_picture->pict_type = 0;
1238 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1239 in_picture->pts * av_q2d(enc->time_base) : NAN;
1240 if (ost->forced_kf_index < ost->forced_kf_count &&
1241 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1242 ost->forced_kf_index++;
1243 forced_keyframe = 1;
1244 } else if (ost->forced_keyframes_pexpr) {
1246 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1247 res = av_expr_eval(ost->forced_keyframes_pexpr,
1248 ost->forced_keyframes_expr_const_values, NULL);
1249 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1250 ost->forced_keyframes_expr_const_values[FKF_N],
1251 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1252 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1253 ost->forced_keyframes_expr_const_values[FKF_T],
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1257 forced_keyframe = 1;
1258 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1259 ost->forced_keyframes_expr_const_values[FKF_N];
1260 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1261 ost->forced_keyframes_expr_const_values[FKF_T];
1262 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1265 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1266 } else if ( ost->forced_keyframes
1267 && !strncmp(ost->forced_keyframes, "source", 6)
1268 && in_picture->key_frame==1) {
1269 forced_keyframe = 1;
1272 if (forced_keyframe) {
1273 in_picture->pict_type = AV_PICTURE_TYPE_I;
1274 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1277 update_benchmark(NULL);
1279 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1280 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1281 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1282 enc->time_base.num, enc->time_base.den);
1285 ost->frames_encoded++;
1287 ret = avcodec_send_frame(enc, in_picture);
1292 ret = avcodec_receive_packet(enc, &pkt);
1293 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1294 if (ret == AVERROR(EAGAIN))
1300 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1301 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1302 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1303 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1306 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1307 pkt.pts = ost->sync_opts;
1309 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1312 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1313 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1314 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1315 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1318 frame_size = pkt.size;
1319 output_packet(of, &pkt, ost);
1321 /* if two pass, output log */
1322 if (ost->logfile && enc->stats_out) {
1323 fprintf(ost->logfile, "%s", enc->stats_out);
1329 * For video, number of frames in == number of packets out.
1330 * But there may be reordering, so we can't throw away frames on encoder
1331 * flush, we need to limit them here, before they go into encoder.
1333 ost->frame_number++;
1335 if (vstats_filename && frame_size)
1336 do_video_stats(ost, frame_size);
1339 if (!ost->last_frame)
1340 ost->last_frame = av_frame_alloc();
1341 av_frame_unref(ost->last_frame);
1342 if (next_picture && ost->last_frame)
1343 av_frame_ref(ost->last_frame, next_picture);
1345 av_frame_free(&ost->last_frame);
1349 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1353 static double psnr(double d)
1355 return -10.0 * log10(d);
1358 static void do_video_stats(OutputStream *ost, int frame_size)
1360 AVCodecContext *enc;
1362 double ti1, bitrate, avg_bitrate;
1364 /* this is executed just the first time do_video_stats is called */
1366 vstats_file = fopen(vstats_filename, "w");
1374 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1375 frame_number = ost->st->nb_frames;
1376 if (vstats_version <= 1) {
1377 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1378 ost->quality / (float)FF_QP2LAMBDA);
1380 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1381 ost->quality / (float)FF_QP2LAMBDA);
1384 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1385 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 fprintf(vstats_file,"f_size= %6d ", frame_size);
1388 /* compute pts value */
1389 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1393 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1394 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1395 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1396 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1397 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1401 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 static void finish_output_stream(OutputStream *ost)
1405 OutputFile *of = output_files[ost->file_index];
1408 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1411 for (i = 0; i < of->ctx->nb_streams; i++)
1412 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1417 * Get and encode new output from any of the filtergraphs, without causing
1420 * @return 0 for success, <0 for severe errors
1422 static int reap_filters(int flush)
1424 AVFrame *filtered_frame = NULL;
1427 /* Reap all buffers present in the buffer sinks */
1428 for (i = 0; i < nb_output_streams; i++) {
1429 OutputStream *ost = output_streams[i];
1430 OutputFile *of = output_files[ost->file_index];
1431 AVFilterContext *filter;
1432 AVCodecContext *enc = ost->enc_ctx;
1435 if (!ost->filter || !ost->filter->graph->graph)
1437 filter = ost->filter->filter;
1439 if (!ost->initialized) {
1440 char error[1024] = "";
1441 ret = init_output_stream(ost, error, sizeof(error));
1443 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1444 ost->file_index, ost->index, error);
1449 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1450 return AVERROR(ENOMEM);
1452 filtered_frame = ost->filtered_frame;
1455 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1456 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1457 AV_BUFFERSINK_FLAG_NO_REQUEST);
1459 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1460 av_log(NULL, AV_LOG_WARNING,
1461 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1462 } else if (flush && ret == AVERROR_EOF) {
1463 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1464 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1468 if (ost->finished) {
1469 av_frame_unref(filtered_frame);
1472 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1473 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1474 AVRational filter_tb = av_buffersink_get_time_base(filter);
1475 AVRational tb = enc->time_base;
1476 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 tb.den <<= extra_bits;
1480 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1481 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1482 float_pts /= 1 << extra_bits;
1483 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1484 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 filtered_frame->pts =
1487 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1488 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1490 //if (ost->source_index >= 0)
1491 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1493 switch (av_buffersink_get_type(filter)) {
1494 case AVMEDIA_TYPE_VIDEO:
1495 if (!ost->frame_aspect_ratio.num)
1496 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1499 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1502 enc->time_base.num, enc->time_base.den);
1505 do_video_out(of, ost, filtered_frame, float_pts);
1507 case AVMEDIA_TYPE_AUDIO:
1508 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509 enc->channels != filtered_frame->channels) {
1510 av_log(NULL, AV_LOG_ERROR,
1511 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1514 do_audio_out(of, ost, filtered_frame);
1517 // TODO support subtitle filters
1521 av_frame_unref(filtered_frame);
1528 static void print_final_stats(int64_t total_size)
1530 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531 uint64_t subtitle_size = 0;
1532 uint64_t data_size = 0;
1533 float percent = -1.0;
1537 for (i = 0; i < nb_output_streams; i++) {
1538 OutputStream *ost = output_streams[i];
1539 switch (ost->enc_ctx->codec_type) {
1540 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543 default: other_size += ost->data_size; break;
1545 extra_size += ost->enc_ctx->extradata_size;
1546 data_size += ost->data_size;
1547 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1548 != AV_CODEC_FLAG_PASS1)
1552 if (data_size && total_size>0 && total_size >= data_size)
1553 percent = 100.0 * (total_size - data_size) / data_size;
1555 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556 video_size / 1024.0,
1557 audio_size / 1024.0,
1558 subtitle_size / 1024.0,
1559 other_size / 1024.0,
1560 extra_size / 1024.0);
1562 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1564 av_log(NULL, AV_LOG_INFO, "unknown");
1565 av_log(NULL, AV_LOG_INFO, "\n");
1567 /* print verbose per-stream stats */
1568 for (i = 0; i < nb_input_files; i++) {
1569 InputFile *f = input_files[i];
1570 uint64_t total_packets = 0, total_size = 0;
1572 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573 i, f->ctx->filename);
1575 for (j = 0; j < f->nb_streams; j++) {
1576 InputStream *ist = input_streams[f->ist_index + j];
1577 enum AVMediaType type = ist->dec_ctx->codec_type;
1579 total_size += ist->data_size;
1580 total_packets += ist->nb_packets;
1582 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583 i, j, media_type_string(type));
1584 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585 ist->nb_packets, ist->data_size);
1587 if (ist->decoding_needed) {
1588 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589 ist->frames_decoded);
1590 if (type == AVMEDIA_TYPE_AUDIO)
1591 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592 av_log(NULL, AV_LOG_VERBOSE, "; ");
1595 av_log(NULL, AV_LOG_VERBOSE, "\n");
1598 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599 total_packets, total_size);
1602 for (i = 0; i < nb_output_files; i++) {
1603 OutputFile *of = output_files[i];
1604 uint64_t total_packets = 0, total_size = 0;
1606 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607 i, of->ctx->filename);
1609 for (j = 0; j < of->ctx->nb_streams; j++) {
1610 OutputStream *ost = output_streams[of->ost_index + j];
1611 enum AVMediaType type = ost->enc_ctx->codec_type;
1613 total_size += ost->data_size;
1614 total_packets += ost->packets_written;
1616 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617 i, j, media_type_string(type));
1618 if (ost->encoding_needed) {
1619 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620 ost->frames_encoded);
1621 if (type == AVMEDIA_TYPE_AUDIO)
1622 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623 av_log(NULL, AV_LOG_VERBOSE, "; ");
1626 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627 ost->packets_written, ost->data_size);
1629 av_log(NULL, AV_LOG_VERBOSE, "\n");
1632 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633 total_packets, total_size);
1635 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1638 av_log(NULL, AV_LOG_WARNING, "\n");
1640 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1648 AVBPrint buf_script;
1650 AVFormatContext *oc;
1652 AVCodecContext *enc;
1653 int frame_number, vid, i;
1656 int64_t pts = INT64_MIN + 1;
1657 static int64_t last_time = -1;
1658 static int qp_histogram[52];
1659 int hours, mins, secs, us;
1663 if (!print_stats && !is_last_report && !progress_avio)
1666 if (!is_last_report) {
1667 if (last_time == -1) {
1668 last_time = cur_time;
1671 if ((cur_time - last_time) < 500000)
1673 last_time = cur_time;
1676 t = (cur_time-timer_start) / 1000000.0;
1679 oc = output_files[0]->ctx;
1681 total_size = avio_size(oc->pb);
1682 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683 total_size = avio_tell(oc->pb);
1687 av_bprint_init(&buf_script, 0, 1);
1688 for (i = 0; i < nb_output_streams; i++) {
1690 ost = output_streams[i];
1692 if (!ost->stream_copy)
1693 q = ost->quality / (float) FF_QP2LAMBDA;
1695 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1697 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698 ost->file_index, ost->index, q);
1700 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1703 frame_number = ost->frame_number;
1704 fps = t > 1 ? frame_number / t : 0;
1705 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1706 frame_number, fps < 9.95, fps, q);
1707 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1709 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710 ost->file_index, ost->index, q);
1712 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1716 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1718 for (j = 0; j < 32; j++)
1719 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1722 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1724 double error, error_sum = 0;
1725 double scale, scale_sum = 0;
1727 char type[3] = { 'Y','U','V' };
1728 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1729 for (j = 0; j < 3; j++) {
1730 if (is_last_report) {
1731 error = enc->error[j];
1732 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1734 error = ost->error[j];
1735 scale = enc->width * enc->height * 255.0 * 255.0;
1741 p = psnr(error / scale);
1742 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1743 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744 ost->file_index, ost->index, type[j] | 32, p);
1746 p = psnr(error_sum / scale_sum);
1747 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1748 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749 ost->file_index, ost->index, p);
1753 /* compute min output value */
1754 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1755 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1756 ost->st->time_base, AV_TIME_BASE_Q));
1758 nb_frames_drop += ost->last_dropped;
1761 secs = FFABS(pts) / AV_TIME_BASE;
1762 us = FFABS(pts) % AV_TIME_BASE;
1768 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1769 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1773 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 "size=%8.0fkB time=", total_size / 1024.0);
1776 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1777 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1778 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1779 (100 * us) / AV_TIME_BASE);
1782 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1783 av_bprintf(&buf_script, "bitrate=N/A\n");
1785 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1786 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1789 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1792 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1793 hours, mins, secs, us);
1795 if (nb_frames_dup || nb_frames_drop)
1796 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1797 nb_frames_dup, nb_frames_drop);
1798 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1799 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1802 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1803 av_bprintf(&buf_script, "speed=N/A\n");
1805 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1806 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1809 if (print_stats || is_last_report) {
1810 const char end = is_last_report ? '\n' : '\r';
1811 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1812 fprintf(stderr, "%s %c", buf, end);
1814 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1819 if (progress_avio) {
1820 av_bprintf(&buf_script, "progress=%s\n",
1821 is_last_report ? "end" : "continue");
1822 avio_write(progress_avio, buf_script.str,
1823 FFMIN(buf_script.len, buf_script.size - 1));
1824 avio_flush(progress_avio);
1825 av_bprint_finalize(&buf_script, NULL);
1826 if (is_last_report) {
1827 if ((ret = avio_closep(&progress_avio)) < 0)
1828 av_log(NULL, AV_LOG_ERROR,
1829 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1834 print_final_stats(total_size);
1837 static void flush_encoders(void)
1841 for (i = 0; i < nb_output_streams; i++) {
1842 OutputStream *ost = output_streams[i];
1843 AVCodecContext *enc = ost->enc_ctx;
1844 OutputFile *of = output_files[ost->file_index];
1846 if (!ost->encoding_needed)
1849 // Try to enable encoding with no input frames.
1850 // Maybe we should just let encoding fail instead.
1851 if (!ost->initialized) {
1852 FilterGraph *fg = ost->filter->graph;
1853 char error[1024] = "";
1855 av_log(NULL, AV_LOG_WARNING,
1856 "Finishing stream %d:%d without any data written to it.\n",
1857 ost->file_index, ost->st->index);
1859 if (ost->filter && !fg->graph) {
1861 for (x = 0; x < fg->nb_inputs; x++) {
1862 InputFilter *ifilter = fg->inputs[x];
1863 if (ifilter->format < 0) {
1864 AVCodecParameters *par = ifilter->ist->st->codecpar;
1865 // We never got any input. Set a fake format, which will
1866 // come from libavformat.
1867 ifilter->format = par->format;
1868 ifilter->sample_rate = par->sample_rate;
1869 ifilter->channels = par->channels;
1870 ifilter->channel_layout = par->channel_layout;
1871 ifilter->width = par->width;
1872 ifilter->height = par->height;
1873 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1877 if (!ifilter_has_all_input_formats(fg))
1880 ret = configure_filtergraph(fg);
1882 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1886 finish_output_stream(ost);
1889 ret = init_output_stream(ost, error, sizeof(error));
1891 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1892 ost->file_index, ost->index, error);
1897 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1899 #if FF_API_LAVF_FMT_RAWPICTURE
1900 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1904 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1908 const char *desc = NULL;
1912 switch (enc->codec_type) {
1913 case AVMEDIA_TYPE_AUDIO:
1916 case AVMEDIA_TYPE_VIDEO:
1923 av_init_packet(&pkt);
1927 update_benchmark(NULL);
1929 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1930 ret = avcodec_send_frame(enc, NULL);
1932 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1939 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1940 if (ret < 0 && ret != AVERROR_EOF) {
1941 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1946 if (ost->logfile && enc->stats_out) {
1947 fprintf(ost->logfile, "%s", enc->stats_out);
1949 if (ret == AVERROR_EOF) {
1952 if (ost->finished & MUXER_FINISHED) {
1953 av_packet_unref(&pkt);
1956 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1957 pkt_size = pkt.size;
1958 output_packet(of, &pkt, ost);
1959 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1960 do_video_stats(ost, pkt_size);
1967 * Check whether a packet from ist should be written into ost at this time
1969 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1971 OutputFile *of = output_files[ost->file_index];
1972 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1974 if (ost->source_index != ist_index)
1980 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1986 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1988 OutputFile *of = output_files[ost->file_index];
1989 InputFile *f = input_files [ist->file_index];
1990 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1991 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1995 av_init_packet(&opkt);
1997 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1998 !ost->copy_initial_nonkeyframes)
2001 if (!ost->frame_number && !ost->copy_prior_start) {
2002 int64_t comp_start = start_time;
2003 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2004 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2005 if (pkt->pts == AV_NOPTS_VALUE ?
2006 ist->pts < comp_start :
2007 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2011 if (of->recording_time != INT64_MAX &&
2012 ist->pts >= of->recording_time + start_time) {
2013 close_output_stream(ost);
2017 if (f->recording_time != INT64_MAX) {
2018 start_time = f->ctx->start_time;
2019 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2020 start_time += f->start_time;
2021 if (ist->pts >= f->recording_time + start_time) {
2022 close_output_stream(ost);
2027 /* force the input stream PTS */
2028 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2031 if (pkt->pts != AV_NOPTS_VALUE)
2032 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2034 opkt.pts = AV_NOPTS_VALUE;
2036 if (pkt->dts == AV_NOPTS_VALUE)
2037 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2039 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2040 opkt.dts -= ost_tb_start_time;
2042 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2043 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2045 duration = ist->dec_ctx->frame_size;
2046 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2047 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2048 ost->mux_timebase) - ost_tb_start_time;
2051 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053 opkt.flags = pkt->flags;
2054 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2055 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2056 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2057 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2060 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2061 &opkt.data, &opkt.size,
2062 pkt->data, pkt->size,
2063 pkt->flags & AV_PKT_FLAG_KEY);
2065 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2070 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2075 opkt.data = pkt->data;
2076 opkt.size = pkt->size;
2078 av_copy_packet_side_data(&opkt, pkt);
2080 #if FF_API_LAVF_FMT_RAWPICTURE
2081 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2082 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2083 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2084 /* store AVPicture in AVPacket, as expected by the output format */
2085 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2087 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2091 opkt.data = (uint8_t *)&pict;
2092 opkt.size = sizeof(AVPicture);
2093 opkt.flags |= AV_PKT_FLAG_KEY;
2097 output_packet(of, &opkt, ost);
2100 int guess_input_channel_layout(InputStream *ist)
2102 AVCodecContext *dec = ist->dec_ctx;
2104 if (!dec->channel_layout) {
2105 char layout_name[256];
2107 if (dec->channels > ist->guess_layout_max)
2109 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2110 if (!dec->channel_layout)
2112 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2113 dec->channels, dec->channel_layout);
2114 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2115 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2120 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2122 if (*got_output || ret<0)
2123 decode_error_stat[ret<0] ++;
2125 if (ret < 0 && exit_on_error)
2128 if (exit_on_error && *got_output && ist) {
2129 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2130 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2136 // Filters can be configured only if the formats of all inputs are known.
2137 static int ifilter_has_all_input_formats(FilterGraph *fg)
2140 for (i = 0; i < fg->nb_inputs; i++) {
2141 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2142 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2148 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2150 FilterGraph *fg = ifilter->graph;
2151 int need_reinit, ret, i;
2153 /* determine if the parameters for this input changed */
2154 need_reinit = ifilter->format != frame->format;
2155 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2156 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2159 switch (ifilter->ist->st->codecpar->codec_type) {
2160 case AVMEDIA_TYPE_AUDIO:
2161 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2162 ifilter->channels != frame->channels ||
2163 ifilter->channel_layout != frame->channel_layout;
2165 case AVMEDIA_TYPE_VIDEO:
2166 need_reinit |= ifilter->width != frame->width ||
2167 ifilter->height != frame->height;
2172 ret = ifilter_parameters_from_frame(ifilter, frame);
2177 /* (re)init the graph if possible, otherwise buffer the frame and return */
2178 if (need_reinit || !fg->graph) {
2179 for (i = 0; i < fg->nb_inputs; i++) {
2180 if (!ifilter_has_all_input_formats(fg)) {
2181 AVFrame *tmp = av_frame_clone(frame);
2183 return AVERROR(ENOMEM);
2184 av_frame_unref(frame);
2186 if (!av_fifo_space(ifilter->frame_queue)) {
2187 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2189 av_frame_free(&tmp);
2193 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2198 ret = reap_filters(1);
2199 if (ret < 0 && ret != AVERROR_EOF) {
2201 av_strerror(ret, errbuf, sizeof(errbuf));
2203 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2207 ret = configure_filtergraph(fg);
2209 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2214 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2216 if (ret != AVERROR_EOF)
2217 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2224 static int ifilter_send_eof(InputFilter *ifilter)
2230 if (ifilter->filter) {
2231 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2235 // the filtergraph was never configured
2236 FilterGraph *fg = ifilter->graph;
2237 for (i = 0; i < fg->nb_inputs; i++)
2238 if (!fg->inputs[i]->eof)
2240 if (i == fg->nb_inputs) {
2241 // All the input streams have finished without the filtergraph
2242 // ever being configured.
2243 // Mark the output streams as finished.
2244 for (j = 0; j < fg->nb_outputs; j++)
2245 finish_output_stream(fg->outputs[j]->ost);
2252 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2253 // There is the following difference: if you got a frame, you must call
2254 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2255 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2256 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2263 ret = avcodec_send_packet(avctx, pkt);
2264 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2265 // decoded frames with avcodec_receive_frame() until done.
2266 if (ret < 0 && ret != AVERROR_EOF)
2270 ret = avcodec_receive_frame(avctx, frame);
2271 if (ret < 0 && ret != AVERROR(EAGAIN))
2279 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2284 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2285 for (i = 0; i < ist->nb_filters; i++) {
2286 if (i < ist->nb_filters - 1) {
2287 f = ist->filter_frame;
2288 ret = av_frame_ref(f, decoded_frame);
2293 ret = ifilter_send_frame(ist->filters[i], f);
2294 if (ret == AVERROR_EOF)
2295 ret = 0; /* ignore */
2297 av_log(NULL, AV_LOG_ERROR,
2298 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2305 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2308 AVFrame *decoded_frame;
2309 AVCodecContext *avctx = ist->dec_ctx;
2311 AVRational decoded_frame_tb;
2313 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2314 return AVERROR(ENOMEM);
2315 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 decoded_frame = ist->decoded_frame;
2319 update_benchmark(NULL);
2320 ret = decode(avctx, decoded_frame, got_output, pkt);
2321 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2325 if (ret >= 0 && avctx->sample_rate <= 0) {
2326 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2327 ret = AVERROR_INVALIDDATA;
2330 if (ret != AVERROR_EOF)
2331 check_decode_result(ist, got_output, ret);
2333 if (!*got_output || ret < 0)
2336 ist->samples_decoded += decoded_frame->nb_samples;
2337 ist->frames_decoded++;
2340 /* increment next_dts to use for the case where the input stream does not
2341 have timestamps or there are multiple frames in the packet */
2342 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2344 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2348 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2349 decoded_frame_tb = ist->st->time_base;
2350 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2351 decoded_frame->pts = pkt->pts;
2352 decoded_frame_tb = ist->st->time_base;
2354 decoded_frame->pts = ist->dts;
2355 decoded_frame_tb = AV_TIME_BASE_Q;
2357 if (decoded_frame->pts != AV_NOPTS_VALUE)
2358 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2359 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2360 (AVRational){1, avctx->sample_rate});
2361 ist->nb_samples = decoded_frame->nb_samples;
2362 err = send_frame_to_filters(ist, decoded_frame);
2364 av_frame_unref(ist->filter_frame);
2365 av_frame_unref(decoded_frame);
2366 return err < 0 ? err : ret;
2369 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2372 AVFrame *decoded_frame;
2373 int i, ret = 0, err = 0;
2374 int64_t best_effort_timestamp;
2375 int64_t dts = AV_NOPTS_VALUE;
2378 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2379 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2381 if (!eof && pkt && pkt->size == 0)
2384 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2385 return AVERROR(ENOMEM);
2386 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 decoded_frame = ist->decoded_frame;
2389 if (ist->dts != AV_NOPTS_VALUE)
2390 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2393 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2396 // The old code used to set dts on the drain packet, which does not work
2397 // with the new API anymore.
2399 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2401 return AVERROR(ENOMEM);
2402 ist->dts_buffer = new;
2403 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2406 update_benchmark(NULL);
2407 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2408 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2412 // The following line may be required in some cases where there is no parser
2413 // or the parser does not has_b_frames correctly
2414 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2415 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2416 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2418 av_log(ist->dec_ctx, AV_LOG_WARNING,
2419 "video_delay is larger in decoder than demuxer %d > %d.\n"
2420 "If you want to help, upload a sample "
2421 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2422 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2423 ist->dec_ctx->has_b_frames,
2424 ist->st->codecpar->video_delay);
2427 if (ret != AVERROR_EOF)
2428 check_decode_result(ist, got_output, ret);
2430 if (*got_output && ret >= 0) {
2431 if (ist->dec_ctx->width != decoded_frame->width ||
2432 ist->dec_ctx->height != decoded_frame->height ||
2433 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2434 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2435 decoded_frame->width,
2436 decoded_frame->height,
2437 decoded_frame->format,
2438 ist->dec_ctx->width,
2439 ist->dec_ctx->height,
2440 ist->dec_ctx->pix_fmt);
2444 if (!*got_output || ret < 0)
2447 if(ist->top_field_first>=0)
2448 decoded_frame->top_field_first = ist->top_field_first;
2450 ist->frames_decoded++;
2452 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2453 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2457 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2459 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2461 if (ist->framerate.num)
2462 best_effort_timestamp = ist->cfr_next_pts++;
2464 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2465 best_effort_timestamp = ist->dts_buffer[0];
2467 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2468 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2469 ist->nb_dts_buffer--;
2472 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2473 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2475 if (ts != AV_NOPTS_VALUE)
2476 ist->next_pts = ist->pts = ts;
2480 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2481 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2482 ist->st->index, av_ts2str(decoded_frame->pts),
2483 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2484 best_effort_timestamp,
2485 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2486 decoded_frame->key_frame, decoded_frame->pict_type,
2487 ist->st->time_base.num, ist->st->time_base.den);
2490 if (ist->st->sample_aspect_ratio.num)
2491 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2493 err = send_frame_to_filters(ist, decoded_frame);
2496 av_frame_unref(ist->filter_frame);
2497 av_frame_unref(decoded_frame);
2498 return err < 0 ? err : ret;
2501 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2504 AVSubtitle subtitle;
2506 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2507 &subtitle, got_output, pkt);
2509 check_decode_result(NULL, got_output, ret);
2511 if (ret < 0 || !*got_output) {
2514 sub2video_flush(ist);
2518 if (ist->fix_sub_duration) {
2520 if (ist->prev_sub.got_output) {
2521 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2522 1000, AV_TIME_BASE);
2523 if (end < ist->prev_sub.subtitle.end_display_time) {
2524 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2525 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2526 ist->prev_sub.subtitle.end_display_time, end,
2527 end <= 0 ? ", dropping it" : "");
2528 ist->prev_sub.subtitle.end_display_time = end;
2531 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2532 FFSWAP(int, ret, ist->prev_sub.ret);
2533 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2541 if (ist->sub2video.frame) {
2542 sub2video_update(ist, &subtitle);
2543 } else if (ist->nb_filters) {
2544 if (!ist->sub2video.sub_queue)
2545 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2546 if (!ist->sub2video.sub_queue)
2548 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2549 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2553 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2557 if (!subtitle.num_rects)
2560 ist->frames_decoded++;
2562 for (i = 0; i < nb_output_streams; i++) {
2563 OutputStream *ost = output_streams[i];
2565 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2566 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2569 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2574 avsubtitle_free(&subtitle);
2578 static int send_filter_eof(InputStream *ist)
2581 for (i = 0; i < ist->nb_filters; i++) {
2582 ret = ifilter_send_eof(ist->filters[i]);
2589 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2590 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2594 int eof_reached = 0;
2597 if (!ist->saw_first_ts) {
2598 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2600 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2601 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2602 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2604 ist->saw_first_ts = 1;
2607 if (ist->next_dts == AV_NOPTS_VALUE)
2608 ist->next_dts = ist->dts;
2609 if (ist->next_pts == AV_NOPTS_VALUE)
2610 ist->next_pts = ist->pts;
2614 av_init_packet(&avpkt);
2621 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2622 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2623 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2624 ist->next_pts = ist->pts = ist->dts;
2627 // while we have more to decode or while the decoder did output something on EOF
2628 while (ist->decoding_needed) {
2629 int64_t duration = 0;
2631 int decode_failed = 0;
2633 ist->pts = ist->next_pts;
2634 ist->dts = ist->next_dts;
2636 switch (ist->dec_ctx->codec_type) {
2637 case AVMEDIA_TYPE_AUDIO:
2638 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2641 case AVMEDIA_TYPE_VIDEO:
2642 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2644 if (!repeating || !pkt || got_output) {
2645 if (pkt && pkt->duration) {
2646 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2647 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2648 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2649 duration = ((int64_t)AV_TIME_BASE *
2650 ist->dec_ctx->framerate.den * ticks) /
2651 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2654 if(ist->dts != AV_NOPTS_VALUE && duration) {
2655 ist->next_dts += duration;
2657 ist->next_dts = AV_NOPTS_VALUE;
2661 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2663 case AVMEDIA_TYPE_SUBTITLE:
2666 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2667 if (!pkt && ret >= 0)
2674 if (ret == AVERROR_EOF) {
2680 if (decode_failed) {
2681 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2682 ist->file_index, ist->st->index, av_err2str(ret));
2684 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2685 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2687 if (!decode_failed || exit_on_error)
2693 ist->got_output = 1;
2698 // During draining, we might get multiple output frames in this loop.
2699 // ffmpeg.c does not drain the filter chain on configuration changes,
2700 // which means if we send multiple frames at once to the filters, and
2701 // one of those frames changes configuration, the buffered frames will
2702 // be lost. This can upset certain FATE tests.
2703 // Decode only 1 frame per call on EOF to appease these FATE tests.
2704 // The ideal solution would be to rewrite decoding to use the new
2705 // decoding API in a better way.
2712 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2713 /* except when looping we need to flush but not to send an EOF */
2714 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2715 int ret = send_filter_eof(ist);
2717 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2722 /* handle stream copy */
2723 if (!ist->decoding_needed) {
2724 ist->dts = ist->next_dts;
2725 switch (ist->dec_ctx->codec_type) {
2726 case AVMEDIA_TYPE_AUDIO:
2727 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2728 ist->dec_ctx->sample_rate;
2730 case AVMEDIA_TYPE_VIDEO:
2731 if (ist->framerate.num) {
2732 // TODO: Remove work-around for c99-to-c89 issue 7
2733 AVRational time_base_q = AV_TIME_BASE_Q;
2734 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2735 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2736 } else if (pkt->duration) {
2737 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2738 } else if(ist->dec_ctx->framerate.num != 0) {
2739 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2740 ist->next_dts += ((int64_t)AV_TIME_BASE *
2741 ist->dec_ctx->framerate.den * ticks) /
2742 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2746 ist->pts = ist->dts;
2747 ist->next_pts = ist->next_dts;
2749 for (i = 0; pkt && i < nb_output_streams; i++) {
2750 OutputStream *ost = output_streams[i];
2752 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2755 do_streamcopy(ist, ost, pkt);
2758 return !eof_reached;
2761 static void print_sdp(void)
2766 AVIOContext *sdp_pb;
2767 AVFormatContext **avc;
2769 for (i = 0; i < nb_output_files; i++) {
2770 if (!output_files[i]->header_written)
2774 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2777 for (i = 0, j = 0; i < nb_output_files; i++) {
2778 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2779 avc[j] = output_files[i]->ctx;
2787 av_sdp_create(avc, j, sdp, sizeof(sdp));
2789 if (!sdp_filename) {
2790 printf("SDP:\n%s\n", sdp);
2793 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2794 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2796 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2797 avio_closep(&sdp_pb);
2798 av_freep(&sdp_filename);
2806 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2809 for (i = 0; hwaccels[i].name; i++)
2810 if (hwaccels[i].pix_fmt == pix_fmt)
2811 return &hwaccels[i];
2815 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2817 InputStream *ist = s->opaque;
2818 const enum AVPixelFormat *p;
2821 for (p = pix_fmts; *p != -1; p++) {
2822 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2823 const HWAccel *hwaccel;
2825 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2828 hwaccel = get_hwaccel(*p);
2830 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2831 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2834 ret = hwaccel->init(s);
2836 if (ist->hwaccel_id == hwaccel->id) {
2837 av_log(NULL, AV_LOG_FATAL,
2838 "%s hwaccel requested for input stream #%d:%d, "
2839 "but cannot be initialized.\n", hwaccel->name,
2840 ist->file_index, ist->st->index);
2841 return AV_PIX_FMT_NONE;
2846 if (ist->hw_frames_ctx) {
2847 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2848 if (!s->hw_frames_ctx)
2849 return AV_PIX_FMT_NONE;
2852 ist->active_hwaccel_id = hwaccel->id;
2853 ist->hwaccel_pix_fmt = *p;
2860 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2862 InputStream *ist = s->opaque;
2864 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2865 return ist->hwaccel_get_buffer(s, frame, flags);
2867 return avcodec_default_get_buffer2(s, frame, flags);
2870 static int init_input_stream(int ist_index, char *error, int error_len)
2873 InputStream *ist = input_streams[ist_index];
2875 if (ist->decoding_needed) {
2876 AVCodec *codec = ist->dec;
2878 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2879 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2880 return AVERROR(EINVAL);
2883 ist->dec_ctx->opaque = ist;
2884 ist->dec_ctx->get_format = get_format;
2885 ist->dec_ctx->get_buffer2 = get_buffer;
2886 ist->dec_ctx->thread_safe_callbacks = 1;
2888 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2889 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2890 (ist->decoding_needed & DECODING_FOR_OST)) {
2891 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2892 if (ist->decoding_needed & DECODING_FOR_FILTER)
2893 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2896 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2898 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2899 * audio, and video decoders such as cuvid or mediacodec */
2900 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2902 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2903 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2904 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2905 if (ret == AVERROR_EXPERIMENTAL)
2906 abort_codec_experimental(codec, 0);
2908 snprintf(error, error_len,
2909 "Error while opening decoder for input stream "
2911 ist->file_index, ist->st->index, av_err2str(ret));
2914 assert_avoptions(ist->decoder_opts);
2917 ist->next_pts = AV_NOPTS_VALUE;
2918 ist->next_dts = AV_NOPTS_VALUE;
2923 static InputStream *get_input_stream(OutputStream *ost)
2925 if (ost->source_index >= 0)
2926 return input_streams[ost->source_index];
2930 static int compare_int64(const void *a, const void *b)
2932 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2935 /* open the muxer when all the streams are initialized */
2936 static int check_init_output_file(OutputFile *of, int file_index)
2940 for (i = 0; i < of->ctx->nb_streams; i++) {
2941 OutputStream *ost = output_streams[of->ost_index + i];
2942 if (!ost->initialized)
2946 of->ctx->interrupt_callback = int_cb;
2948 ret = avformat_write_header(of->ctx, &of->opts);
2950 av_log(NULL, AV_LOG_ERROR,
2951 "Could not write header for output file #%d "
2952 "(incorrect codec parameters ?): %s\n",
2953 file_index, av_err2str(ret));
2956 //assert_avoptions(of->opts);
2957 of->header_written = 1;
2959 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2961 if (sdp_filename || want_sdp)
2964 /* flush the muxing queues */
2965 for (i = 0; i < of->ctx->nb_streams; i++) {
2966 OutputStream *ost = output_streams[of->ost_index + i];
2968 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2969 if (!av_fifo_size(ost->muxing_queue))
2970 ost->mux_timebase = ost->st->time_base;
2972 while (av_fifo_size(ost->muxing_queue)) {
2974 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2975 write_packet(of, &pkt, ost);
2982 static int init_output_bsfs(OutputStream *ost)
2987 if (!ost->nb_bitstream_filters)
2990 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2991 ctx = ost->bsf_ctx[i];
2993 ret = avcodec_parameters_copy(ctx->par_in,
2994 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2998 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3000 ret = av_bsf_init(ctx);
3002 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3003 ost->bsf_ctx[i]->filter->name);
3008 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3009 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3013 ost->st->time_base = ctx->time_base_out;
3018 static int init_output_stream_streamcopy(OutputStream *ost)
3020 OutputFile *of = output_files[ost->file_index];
3021 InputStream *ist = get_input_stream(ost);
3022 AVCodecParameters *par_dst = ost->st->codecpar;
3023 AVCodecParameters *par_src = ost->ref_par;
3026 uint32_t codec_tag = par_dst->codec_tag;
3028 av_assert0(ist && !ost->filter);
3030 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3032 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3034 av_log(NULL, AV_LOG_FATAL,
3035 "Error setting up codec context options.\n");
3038 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3041 unsigned int codec_tag_tmp;
3042 if (!of->ctx->oformat->codec_tag ||
3043 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3044 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3045 codec_tag = par_src->codec_tag;
3048 ret = avcodec_parameters_copy(par_dst, par_src);
3052 par_dst->codec_tag = codec_tag;
3054 if (!ost->frame_rate.num)
3055 ost->frame_rate = ist->framerate;
3056 ost->st->avg_frame_rate = ost->frame_rate;
3058 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3062 // copy timebase while removing common factors
3063 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3064 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3066 // copy estimated duration as a hint to the muxer
3067 if (ost->st->duration <= 0 && ist->st->duration > 0)
3068 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3071 ost->st->disposition = ist->st->disposition;
3073 if (ist->st->nb_side_data) {
3074 for (i = 0; i < ist->st->nb_side_data; i++) {
3075 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3078 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3080 return AVERROR(ENOMEM);
3081 memcpy(dst_data, sd_src->data, sd_src->size);
3085 if (ost->rotate_overridden) {
3086 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3087 sizeof(int32_t) * 9);
3089 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3092 ost->parser = av_parser_init(par_dst->codec_id);
3093 ost->parser_avctx = avcodec_alloc_context3(NULL);
3094 if (!ost->parser_avctx)
3095 return AVERROR(ENOMEM);
3097 switch (par_dst->codec_type) {
3098 case AVMEDIA_TYPE_AUDIO:
3099 if (audio_volume != 256) {
3100 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3103 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3104 par_dst->block_align= 0;
3105 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3106 par_dst->block_align= 0;
3108 case AVMEDIA_TYPE_VIDEO:
3109 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3111 av_mul_q(ost->frame_aspect_ratio,
3112 (AVRational){ par_dst->height, par_dst->width });
3113 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3114 "with stream copy may produce invalid files\n");
3116 else if (ist->st->sample_aspect_ratio.num)
3117 sar = ist->st->sample_aspect_ratio;
3119 sar = par_src->sample_aspect_ratio;
3120 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3121 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3122 ost->st->r_frame_rate = ist->st->r_frame_rate;
3126 ost->mux_timebase = ist->st->time_base;
3131 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3133 AVDictionaryEntry *e;
3135 uint8_t *encoder_string;
3136 int encoder_string_len;
3137 int format_flags = 0;
3138 int codec_flags = 0;
3140 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3143 e = av_dict_get(of->opts, "fflags", NULL, 0);
3145 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3148 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3150 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3152 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3155 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3158 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3159 encoder_string = av_mallocz(encoder_string_len);
3160 if (!encoder_string)
3163 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3164 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3166 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3167 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3168 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3169 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3172 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3173 AVCodecContext *avctx)
3176 int n = 1, i, size, index = 0;
3179 for (p = kf; *p; p++)
3183 pts = av_malloc_array(size, sizeof(*pts));
3185 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3190 for (i = 0; i < n; i++) {
3191 char *next = strchr(p, ',');
3196 if (!memcmp(p, "chapters", 8)) {
3198 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3201 if (avf->nb_chapters > INT_MAX - size ||
3202 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3204 av_log(NULL, AV_LOG_FATAL,
3205 "Could not allocate forced key frames array.\n");
3208 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3209 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3211 for (j = 0; j < avf->nb_chapters; j++) {
3212 AVChapter *c = avf->chapters[j];
3213 av_assert1(index < size);
3214 pts[index++] = av_rescale_q(c->start, c->time_base,
3215 avctx->time_base) + t;
3220 t = parse_time_or_die("force_key_frames", p, 1);
3221 av_assert1(index < size);
3222 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3229 av_assert0(index == size);
3230 qsort(pts, size, sizeof(*pts), compare_int64);
3231 ost->forced_kf_count = size;
3232 ost->forced_kf_pts = pts;
3235 static int init_output_stream_encode(OutputStream *ost)
3237 InputStream *ist = get_input_stream(ost);
3238 AVCodecContext *enc_ctx = ost->enc_ctx;
3239 AVCodecContext *dec_ctx = NULL;
3240 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3243 set_encoder_id(output_files[ost->file_index], ost);
3245 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3246 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3247 // which have to be filtered out to prevent leaking them to output files.
3248 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3251 ost->st->disposition = ist->st->disposition;
3253 dec_ctx = ist->dec_ctx;
3255 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3257 for (j = 0; j < oc->nb_streams; j++) {
3258 AVStream *st = oc->streams[j];
3259 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3262 if (j == oc->nb_streams)
3263 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3264 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3265 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3268 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3269 if (!ost->frame_rate.num)
3270 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3271 if (ist && !ost->frame_rate.num)
3272 ost->frame_rate = ist->framerate;
3273 if (ist && !ost->frame_rate.num)
3274 ost->frame_rate = ist->st->r_frame_rate;
3275 if (ist && !ost->frame_rate.num) {
3276 ost->frame_rate = (AVRational){25, 1};
3277 av_log(NULL, AV_LOG_WARNING,
3279 "about the input framerate is available. Falling "
3280 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3281 "if you want a different framerate.\n",
3282 ost->file_index, ost->index);
3284 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3285 if (ost->enc->supported_framerates && !ost->force_fps) {
3286 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3287 ost->frame_rate = ost->enc->supported_framerates[idx];
3289 // reduce frame rate for mpeg4 to be within the spec limits
3290 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3291 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3292 ost->frame_rate.num, ost->frame_rate.den, 65535);
3296 switch (enc_ctx->codec_type) {
3297 case AVMEDIA_TYPE_AUDIO:
3298 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3300 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3301 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3302 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3303 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3304 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3305 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3307 case AVMEDIA_TYPE_VIDEO:
3308 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3309 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3310 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3311 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3312 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3313 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3314 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3316 for (j = 0; j < ost->forced_kf_count; j++)
3317 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3319 enc_ctx->time_base);
3321 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3322 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3323 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3324 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3325 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3326 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3327 if (!strncmp(ost->enc->name, "libx264", 7) &&
3328 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3329 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3330 av_log(NULL, AV_LOG_WARNING,
3331 "No pixel format specified, %s for H.264 encoding chosen.\n"
3332 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3333 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3334 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3335 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3336 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3337 av_log(NULL, AV_LOG_WARNING,
3338 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3339 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3340 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3341 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3343 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3344 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3346 enc_ctx->framerate = ost->frame_rate;
3348 ost->st->avg_frame_rate = ost->frame_rate;
3351 enc_ctx->width != dec_ctx->width ||
3352 enc_ctx->height != dec_ctx->height ||
3353 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3354 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3357 if (ost->forced_keyframes) {
3358 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3359 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3360 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3362 av_log(NULL, AV_LOG_ERROR,
3363 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3366 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3367 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3368 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3369 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3371 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3372 // parse it only for static kf timings
3373 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3374 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3378 case AVMEDIA_TYPE_SUBTITLE:
3379 enc_ctx->time_base = AV_TIME_BASE_Q;
3380 if (!enc_ctx->width) {
3381 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3382 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3385 case AVMEDIA_TYPE_DATA:
3392 ost->mux_timebase = enc_ctx->time_base;
3397 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3401 if (ost->encoding_needed) {
3402 AVCodec *codec = ost->enc;
3403 AVCodecContext *dec = NULL;
3406 ret = init_output_stream_encode(ost);
3410 if ((ist = get_input_stream(ost)))
3412 if (dec && dec->subtitle_header) {
3413 /* ASS code assumes this buffer is null terminated so add extra byte. */
3414 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3415 if (!ost->enc_ctx->subtitle_header)
3416 return AVERROR(ENOMEM);
3417 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3418 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3420 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3421 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3422 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3424 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3425 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3426 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3428 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3429 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3430 av_buffersink_get_format(ost->filter->filter)) {
3431 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3432 if (!ost->enc_ctx->hw_frames_ctx)
3433 return AVERROR(ENOMEM);
3436 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3437 if (ret == AVERROR_EXPERIMENTAL)
3438 abort_codec_experimental(codec, 1);
3439 snprintf(error, error_len,
3440 "Error while opening encoder for output stream #%d:%d - "
3441 "maybe incorrect parameters such as bit_rate, rate, width or height",
3442 ost->file_index, ost->index);
3445 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3446 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3447 av_buffersink_set_frame_size(ost->filter->filter,
3448 ost->enc_ctx->frame_size);
3449 assert_avoptions(ost->encoder_opts);
3450 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3451 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3452 " It takes bits/s as argument, not kbits/s\n");
3454 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3456 av_log(NULL, AV_LOG_FATAL,
3457 "Error initializing the output stream codec context.\n");
3461 * FIXME: ost->st->codec should't be needed here anymore.
3463 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3467 if (ost->enc_ctx->nb_coded_side_data) {
3470 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3471 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3474 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3476 return AVERROR(ENOMEM);
3477 memcpy(dst_data, sd_src->data, sd_src->size);
3482 * Add global input side data. For now this is naive, and copies it
3483 * from the input stream's global side data. All side data should
3484 * really be funneled over AVFrame and libavfilter, then added back to
3485 * packet side data, and then potentially using the first packet for
3490 for (i = 0; i < ist->st->nb_side_data; i++) {
3491 AVPacketSideData *sd = &ist->st->side_data[i];
3492 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3494 return AVERROR(ENOMEM);
3495 memcpy(dst, sd->data, sd->size);
3496 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3497 av_display_rotation_set((uint32_t *)dst, 0);
3501 // copy timebase while removing common factors
3502 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3503 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3505 // copy estimated duration as a hint to the muxer
3506 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3507 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3509 ost->st->codec->codec= ost->enc_ctx->codec;
3510 } else if (ost->stream_copy) {
3511 ret = init_output_stream_streamcopy(ost);
3516 * FIXME: will the codec context used by the parser during streamcopy
3517 * This should go away with the new parser API.
3519 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3524 // parse user provided disposition, and update stream values
3525 if (ost->disposition) {
3526 static const AVOption opts[] = {
3527 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3528 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3529 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3530 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3531 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3532 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3533 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3534 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3535 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3536 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3537 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3538 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3539 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3540 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3543 static const AVClass class = {
3545 .item_name = av_default_item_name,
3547 .version = LIBAVUTIL_VERSION_INT,
3549 const AVClass *pclass = &class;
3551 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3556 /* initialize bitstream filters for the output stream
3557 * needs to be done here, because the codec id for streamcopy is not
3558 * known until now */
3559 ret = init_output_bsfs(ost);
3563 ost->initialized = 1;
3565 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3572 static void report_new_stream(int input_index, AVPacket *pkt)
3574 InputFile *file = input_files[input_index];
3575 AVStream *st = file->ctx->streams[pkt->stream_index];
3577 if (pkt->stream_index < file->nb_streams_warn)
3579 av_log(file->ctx, AV_LOG_WARNING,
3580 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3581 av_get_media_type_string(st->codecpar->codec_type),
3582 input_index, pkt->stream_index,
3583 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3584 file->nb_streams_warn = pkt->stream_index + 1;
3587 static int transcode_init(void)
3589 int ret = 0, i, j, k;
3590 AVFormatContext *oc;
3593 char error[1024] = {0};
3595 for (i = 0; i < nb_filtergraphs; i++) {
3596 FilterGraph *fg = filtergraphs[i];
3597 for (j = 0; j < fg->nb_outputs; j++) {
3598 OutputFilter *ofilter = fg->outputs[j];
3599 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3601 if (fg->nb_inputs != 1)
3603 for (k = nb_input_streams-1; k >= 0 ; k--)
3604 if (fg->inputs[0]->ist == input_streams[k])
3606 ofilter->ost->source_index = k;
3610 /* init framerate emulation */
3611 for (i = 0; i < nb_input_files; i++) {
3612 InputFile *ifile = input_files[i];
3613 if (ifile->rate_emu)
3614 for (j = 0; j < ifile->nb_streams; j++)
3615 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3618 /* init input streams */
3619 for (i = 0; i < nb_input_streams; i++)
3620 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3621 for (i = 0; i < nb_output_streams; i++) {
3622 ost = output_streams[i];
3623 avcodec_close(ost->enc_ctx);
3628 /* open each encoder */
3629 for (i = 0; i < nb_output_streams; i++) {
3630 // skip streams fed from filtergraphs until we have a frame for them
3631 if (output_streams[i]->filter)
3634 ret = init_output_stream(output_streams[i], error, sizeof(error));
3639 /* discard unused programs */
3640 for (i = 0; i < nb_input_files; i++) {
3641 InputFile *ifile = input_files[i];
3642 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3643 AVProgram *p = ifile->ctx->programs[j];
3644 int discard = AVDISCARD_ALL;
3646 for (k = 0; k < p->nb_stream_indexes; k++)
3647 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3648 discard = AVDISCARD_DEFAULT;
3651 p->discard = discard;
3655 /* write headers for files with no streams */
3656 for (i = 0; i < nb_output_files; i++) {
3657 oc = output_files[i]->ctx;
3658 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3659 ret = check_init_output_file(output_files[i], i);
3666 /* dump the stream mapping */
3667 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3668 for (i = 0; i < nb_input_streams; i++) {
3669 ist = input_streams[i];
3671 for (j = 0; j < ist->nb_filters; j++) {
3672 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3673 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3674 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3675 ist->filters[j]->name);
3676 if (nb_filtergraphs > 1)
3677 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3678 av_log(NULL, AV_LOG_INFO, "\n");
3683 for (i = 0; i < nb_output_streams; i++) {
3684 ost = output_streams[i];
3686 if (ost->attachment_filename) {
3687 /* an attached file */
3688 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3689 ost->attachment_filename, ost->file_index, ost->index);
3693 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3694 /* output from a complex graph */
3695 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3696 if (nb_filtergraphs > 1)
3697 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3699 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3700 ost->index, ost->enc ? ost->enc->name : "?");
3704 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3705 input_streams[ost->source_index]->file_index,
3706 input_streams[ost->source_index]->st->index,
3709 if (ost->sync_ist != input_streams[ost->source_index])
3710 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3711 ost->sync_ist->file_index,
3712 ost->sync_ist->st->index);
3713 if (ost->stream_copy)
3714 av_log(NULL, AV_LOG_INFO, " (copy)");
3716 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3717 const AVCodec *out_codec = ost->enc;
3718 const char *decoder_name = "?";
3719 const char *in_codec_name = "?";
3720 const char *encoder_name = "?";
3721 const char *out_codec_name = "?";
3722 const AVCodecDescriptor *desc;
3725 decoder_name = in_codec->name;
3726 desc = avcodec_descriptor_get(in_codec->id);
3728 in_codec_name = desc->name;
3729 if (!strcmp(decoder_name, in_codec_name))
3730 decoder_name = "native";
3734 encoder_name = out_codec->name;
3735 desc = avcodec_descriptor_get(out_codec->id);
3737 out_codec_name = desc->name;
3738 if (!strcmp(encoder_name, out_codec_name))
3739 encoder_name = "native";
3742 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3743 in_codec_name, decoder_name,
3744 out_codec_name, encoder_name);
3746 av_log(NULL, AV_LOG_INFO, "\n");
3750 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3754 atomic_store(&transcode_init_done, 1);
3759 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3760 static int need_output(void)
3764 for (i = 0; i < nb_output_streams; i++) {
3765 OutputStream *ost = output_streams[i];
3766 OutputFile *of = output_files[ost->file_index];
3767 AVFormatContext *os = output_files[ost->file_index]->ctx;
3769 if (ost->finished ||
3770 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3772 if (ost->frame_number >= ost->max_frames) {
3774 for (j = 0; j < of->ctx->nb_streams; j++)
3775 close_output_stream(output_streams[of->ost_index + j]);
3786 * Select the output stream to process.
3788 * @return selected output stream, or NULL if none available
3790 static OutputStream *choose_output(void)
3793 int64_t opts_min = INT64_MAX;
3794 OutputStream *ost_min = NULL;
3796 for (i = 0; i < nb_output_streams; i++) {
3797 OutputStream *ost = output_streams[i];
3798 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3799 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3801 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3802 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3804 if (!ost->initialized && !ost->inputs_done)
3807 if (!ost->finished && opts < opts_min) {
3809 ost_min = ost->unavailable ? NULL : ost;
3815 static void set_tty_echo(int on)
3819 if (tcgetattr(0, &tty) == 0) {
3820 if (on) tty.c_lflag |= ECHO;
3821 else tty.c_lflag &= ~ECHO;
3822 tcsetattr(0, TCSANOW, &tty);
3827 static int check_keyboard_interaction(int64_t cur_time)
3830 static int64_t last_time;
3831 if (received_nb_signals)
3832 return AVERROR_EXIT;
3833 /* read_key() returns 0 on EOF */
3834 if(cur_time - last_time >= 100000 && !run_as_daemon){
3836 last_time = cur_time;
3840 return AVERROR_EXIT;
3841 if (key == '+') av_log_set_level(av_log_get_level()+10);
3842 if (key == '-') av_log_set_level(av_log_get_level()-10);
3843 if (key == 's') qp_hist ^= 1;
3846 do_hex_dump = do_pkt_dump = 0;
3847 } else if(do_pkt_dump){
3851 av_log_set_level(AV_LOG_DEBUG);
3853 if (key == 'c' || key == 'C'){
3854 char buf[4096], target[64], command[256], arg[256] = {0};
3857 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3860 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3865 fprintf(stderr, "\n");
3867 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3868 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3869 target, time, command, arg);
3870 for (i = 0; i < nb_filtergraphs; i++) {
3871 FilterGraph *fg = filtergraphs[i];
3874 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3875 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3876 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3877 } else if (key == 'c') {
3878 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3879 ret = AVERROR_PATCHWELCOME;
3881 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3883 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3888 av_log(NULL, AV_LOG_ERROR,
3889 "Parse error, at least 3 arguments were expected, "
3890 "only %d given in string '%s'\n", n, buf);
3893 if (key == 'd' || key == 'D'){
3896 debug = input_streams[0]->st->codec->debug<<1;
3897 if(!debug) debug = 1;
3898 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3905 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3910 fprintf(stderr, "\n");
3911 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3912 fprintf(stderr,"error parsing debug value\n");
3914 for(i=0;i<nb_input_streams;i++) {
3915 input_streams[i]->st->codec->debug = debug;
3917 for(i=0;i<nb_output_streams;i++) {
3918 OutputStream *ost = output_streams[i];
3919 ost->enc_ctx->debug = debug;
3921 if(debug) av_log_set_level(AV_LOG_DEBUG);
3922 fprintf(stderr,"debug=%d\n", debug);
3925 fprintf(stderr, "key function\n"
3926 "? show this help\n"
3927 "+ increase verbosity\n"
3928 "- decrease verbosity\n"
3929 "c Send command to first matching filter supporting it\n"
3930 "C Send/Queue command to all matching filters\n"
3931 "D cycle through available debug modes\n"
3932 "h dump packets/hex press to cycle through the 3 states\n"
3934 "s Show QP histogram\n"
3941 static void *input_thread(void *arg)
3944 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3949 ret = av_read_frame(f->ctx, &pkt);
3951 if (ret == AVERROR(EAGAIN)) {
3956 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3959 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3960 if (flags && ret == AVERROR(EAGAIN)) {
3962 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3963 av_log(f->ctx, AV_LOG_WARNING,
3964 "Thread message queue blocking; consider raising the "
3965 "thread_queue_size option (current value: %d)\n",
3966 f->thread_queue_size);
3969 if (ret != AVERROR_EOF)
3970 av_log(f->ctx, AV_LOG_ERROR,
3971 "Unable to send packet to main thread: %s\n",
3973 av_packet_unref(&pkt);
3974 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3982 static void free_input_threads(void)
3986 for (i = 0; i < nb_input_files; i++) {
3987 InputFile *f = input_files[i];
3990 if (!f || !f->in_thread_queue)
3992 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3993 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3994 av_packet_unref(&pkt);
3996 pthread_join(f->thread, NULL);
3998 av_thread_message_queue_free(&f->in_thread_queue);
4002 static int init_input_threads(void)
4006 if (nb_input_files == 1)
4009 for (i = 0; i < nb_input_files; i++) {
4010 InputFile *f = input_files[i];
4012 if (f->ctx->pb ? !f->ctx->pb->seekable :
4013 strcmp(f->ctx->iformat->name, "lavfi"))
4014 f->non_blocking = 1;
4015 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4016 f->thread_queue_size, sizeof(AVPacket));
4020 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4021 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4022 av_thread_message_queue_free(&f->in_thread_queue);
4023 return AVERROR(ret);
4029 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4031 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4033 AV_THREAD_MESSAGE_NONBLOCK : 0);
4037 static int get_input_packet(InputFile *f, AVPacket *pkt)
4041 for (i = 0; i < f->nb_streams; i++) {
4042 InputStream *ist = input_streams[f->ist_index + i];
4043 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4044 int64_t now = av_gettime_relative() - ist->start;
4046 return AVERROR(EAGAIN);
4051 if (nb_input_files > 1)
4052 return get_input_packet_mt(f, pkt);
4054 return av_read_frame(f->ctx, pkt);
4057 static int got_eagain(void)
4060 for (i = 0; i < nb_output_streams; i++)
4061 if (output_streams[i]->unavailable)
4066 static void reset_eagain(void)
4069 for (i = 0; i < nb_input_files; i++)
4070 input_files[i]->eagain = 0;
4071 for (i = 0; i < nb_output_streams; i++)
4072 output_streams[i]->unavailable = 0;
4075 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4076 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4077 AVRational time_base)
4083 return tmp_time_base;
4086 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4089 return tmp_time_base;
4095 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4098 AVCodecContext *avctx;
4099 int i, ret, has_audio = 0;
4100 int64_t duration = 0;
4102 ret = av_seek_frame(is, -1, is->start_time, 0);
4106 for (i = 0; i < ifile->nb_streams; i++) {
4107 ist = input_streams[ifile->ist_index + i];
4108 avctx = ist->dec_ctx;
4111 if (ist->decoding_needed) {
4112 process_input_packet(ist, NULL, 1);
4113 avcodec_flush_buffers(avctx);
4116 /* duration is the length of the last frame in a stream
4117 * when audio stream is present we don't care about
4118 * last video frame length because it's not defined exactly */
4119 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4123 for (i = 0; i < ifile->nb_streams; i++) {
4124 ist = input_streams[ifile->ist_index + i];
4125 avctx = ist->dec_ctx;
4128 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4129 AVRational sample_rate = {1, avctx->sample_rate};
4131 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4135 if (ist->framerate.num) {
4136 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4137 } else if (ist->st->avg_frame_rate.num) {
4138 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4139 } else duration = 1;
4141 if (!ifile->duration)
4142 ifile->time_base = ist->st->time_base;
4143 /* the total duration of the stream, max_pts - min_pts is
4144 * the duration of the stream without the last frame */
4145 duration += ist->max_pts - ist->min_pts;
4146 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4150 if (ifile->loop > 0)
4158 * - 0 -- one packet was read and processed
4159 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4160 * this function should be called again
4161 * - AVERROR_EOF -- this function should not be called again
4163 static int process_input(int file_index)
4165 InputFile *ifile = input_files[file_index];
4166 AVFormatContext *is;
4174 ret = get_input_packet(ifile, &pkt);
4176 if (ret == AVERROR(EAGAIN)) {
4180 if (ret < 0 && ifile->loop) {
4181 if ((ret = seek_to_start(ifile, is)) < 0)
4183 ret = get_input_packet(ifile, &pkt);
4184 if (ret == AVERROR(EAGAIN)) {
4190 if (ret != AVERROR_EOF) {
4191 print_error(is->filename, ret);
4196 for (i = 0; i < ifile->nb_streams; i++) {
4197 ist = input_streams[ifile->ist_index + i];
4198 if (ist->decoding_needed) {
4199 ret = process_input_packet(ist, NULL, 0);
4204 /* mark all outputs that don't go through lavfi as finished */
4205 for (j = 0; j < nb_output_streams; j++) {
4206 OutputStream *ost = output_streams[j];
4208 if (ost->source_index == ifile->ist_index + i &&
4209 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4210 finish_output_stream(ost);
4214 ifile->eof_reached = 1;
4215 return AVERROR(EAGAIN);
4221 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4222 is->streams[pkt.stream_index]);
4224 /* the following test is needed in case new streams appear
4225 dynamically in stream : we ignore them */
4226 if (pkt.stream_index >= ifile->nb_streams) {
4227 report_new_stream(file_index, &pkt);
4228 goto discard_packet;
4231 ist = input_streams[ifile->ist_index + pkt.stream_index];
4233 ist->data_size += pkt.size;
4237 goto discard_packet;
4239 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4240 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4245 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4246 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4247 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4248 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4249 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4250 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4251 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4252 av_ts2str(input_files[ist->file_index]->ts_offset),
4253 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4256 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4257 int64_t stime, stime2;
4258 // Correcting starttime based on the enabled streams
4259 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4260 // so we instead do it here as part of discontinuity handling
4261 if ( ist->next_dts == AV_NOPTS_VALUE
4262 && ifile->ts_offset == -is->start_time
4263 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4264 int64_t new_start_time = INT64_MAX;
4265 for (i=0; i<is->nb_streams; i++) {
4266 AVStream *st = is->streams[i];
4267 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4269 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4271 if (new_start_time > is->start_time) {
4272 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4273 ifile->ts_offset = -new_start_time;
4277 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4278 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4279 ist->wrap_correction_done = 1;
4281 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4282 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4283 ist->wrap_correction_done = 0;
4285 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4286 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4287 ist->wrap_correction_done = 0;
4291 /* add the stream-global side data to the first packet */
4292 if (ist->nb_packets == 1) {
4293 for (i = 0; i < ist->st->nb_side_data; i++) {
4294 AVPacketSideData *src_sd = &ist->st->side_data[i];
4297 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4300 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4303 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4307 memcpy(dst_data, src_sd->data, src_sd->size);
4311 if (pkt.dts != AV_NOPTS_VALUE)
4312 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4313 if (pkt.pts != AV_NOPTS_VALUE)
4314 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4316 if (pkt.pts != AV_NOPTS_VALUE)
4317 pkt.pts *= ist->ts_scale;
4318 if (pkt.dts != AV_NOPTS_VALUE)
4319 pkt.dts *= ist->ts_scale;
4321 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4322 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4323 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4324 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4325 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4326 int64_t delta = pkt_dts - ifile->last_ts;
4327 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4328 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4329 ifile->ts_offset -= delta;
4330 av_log(NULL, AV_LOG_DEBUG,
4331 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4332 delta, ifile->ts_offset);
4333 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4334 if (pkt.pts != AV_NOPTS_VALUE)
4335 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4339 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4340 if (pkt.pts != AV_NOPTS_VALUE) {
4341 pkt.pts += duration;
4342 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4343 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4346 if (pkt.dts != AV_NOPTS_VALUE)
4347 pkt.dts += duration;
4349 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4350 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4351 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4352 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4354 int64_t delta = pkt_dts - ist->next_dts;
4355 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4356 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4357 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4358 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4359 ifile->ts_offset -= delta;
4360 av_log(NULL, AV_LOG_DEBUG,
4361 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4362 delta, ifile->ts_offset);
4363 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4364 if (pkt.pts != AV_NOPTS_VALUE)
4365 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4368 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4369 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4370 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4371 pkt.dts = AV_NOPTS_VALUE;
4373 if (pkt.pts != AV_NOPTS_VALUE){
4374 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4375 delta = pkt_pts - ist->next_dts;
4376 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4377 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4378 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4379 pkt.pts = AV_NOPTS_VALUE;
4385 if (pkt.dts != AV_NOPTS_VALUE)
4386 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4389 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4390 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4391 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4392 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4393 av_ts2str(input_files[ist->file_index]->ts_offset),
4394 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4397 sub2video_heartbeat(ist, pkt.pts);
4399 process_input_packet(ist, &pkt, 0);
4402 av_packet_unref(&pkt);
4408 * Perform a step of transcoding for the specified filter graph.
4410 * @param[in] graph filter graph to consider
4411 * @param[out] best_ist input stream where a frame would allow to continue
4412 * @return 0 for success, <0 for error
4414 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4417 int nb_requests, nb_requests_max = 0;
4418 InputFilter *ifilter;
4422 ret = avfilter_graph_request_oldest(graph->graph);
4424 return reap_filters(0);
4426 if (ret == AVERROR_EOF) {
4427 ret = reap_filters(1);
4428 for (i = 0; i < graph->nb_outputs; i++)
4429 close_output_stream(graph->outputs[i]->ost);
4432 if (ret != AVERROR(EAGAIN))
4435 for (i = 0; i < graph->nb_inputs; i++) {
4436 ifilter = graph->inputs[i];
4438 if (input_files[ist->file_index]->eagain ||
4439 input_files[ist->file_index]->eof_reached)
4441 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4442 if (nb_requests > nb_requests_max) {
4443 nb_requests_max = nb_requests;
4449 for (i = 0; i < graph->nb_outputs; i++)
4450 graph->outputs[i]->ost->unavailable = 1;
4456 * Run a single step of transcoding.
4458 * @return 0 for success, <0 for error
4460 static int transcode_step(void)
4463 InputStream *ist = NULL;
4466 ost = choose_output();
4473 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4477 if (ost->filter && !ost->filter->graph->graph) {
4478 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4479 ret = configure_filtergraph(ost->filter->graph);
4481 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4487 if (ost->filter && ost->filter->graph->graph) {
4488 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4492 } else if (ost->filter) {
4494 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4495 InputFilter *ifilter = ost->filter->graph->inputs[i];
4496 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4502 ost->inputs_done = 1;
4506 av_assert0(ost->source_index >= 0);
4507 ist = input_streams[ost->source_index];
4510 ret = process_input(ist->file_index);
4511 if (ret == AVERROR(EAGAIN)) {
4512 if (input_files[ist->file_index]->eagain)
4513 ost->unavailable = 1;
4518 return ret == AVERROR_EOF ? 0 : ret;
4520 return reap_filters(0);
4524 * The following code is the main loop of the file converter
4526 static int transcode(void)
4529 AVFormatContext *os;
4532 int64_t timer_start;
4533 int64_t total_packets_written = 0;
4535 ret = transcode_init();
4539 if (stdin_interaction) {
4540 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4543 timer_start = av_gettime_relative();
4546 if ((ret = init_input_threads()) < 0)
4550 while (!received_sigterm) {
4551 int64_t cur_time= av_gettime_relative();
4553 /* if 'q' pressed, exits */
4554 if (stdin_interaction)
4555 if (check_keyboard_interaction(cur_time) < 0)
4558 /* check if there's any stream where output is still needed */
4559 if (!need_output()) {
4560 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4564 ret = transcode_step();
4565 if (ret < 0 && ret != AVERROR_EOF) {
4567 av_strerror(ret, errbuf, sizeof(errbuf));
4569 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4573 /* dump report by using the output first video and audio streams */
4574 print_report(0, timer_start, cur_time);
4577 free_input_threads();
4580 /* at the end of stream, we must flush the decoder buffers */
4581 for (i = 0; i < nb_input_streams; i++) {
4582 ist = input_streams[i];
4583 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4584 process_input_packet(ist, NULL, 0);
4591 /* write the trailer if needed and close file */
4592 for (i = 0; i < nb_output_files; i++) {
4593 os = output_files[i]->ctx;
4594 if (!output_files[i]->header_written) {
4595 av_log(NULL, AV_LOG_ERROR,
4596 "Nothing was written into output file %d (%s), because "
4597 "at least one of its streams received no packets.\n",
4601 if ((ret = av_write_trailer(os)) < 0) {
4602 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4608 /* dump report by using the first video and audio streams */
4609 print_report(1, timer_start, av_gettime_relative());
4611 /* close each encoder */
4612 for (i = 0; i < nb_output_streams; i++) {
4613 ost = output_streams[i];
4614 if (ost->encoding_needed) {
4615 av_freep(&ost->enc_ctx->stats_in);
4617 total_packets_written += ost->packets_written;
4620 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4621 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4625 /* close each decoder */
4626 for (i = 0; i < nb_input_streams; i++) {
4627 ist = input_streams[i];
4628 if (ist->decoding_needed) {
4629 avcodec_close(ist->dec_ctx);
4630 if (ist->hwaccel_uninit)
4631 ist->hwaccel_uninit(ist->dec_ctx);
4635 av_buffer_unref(&hw_device_ctx);
4642 free_input_threads();
4645 if (output_streams) {
4646 for (i = 0; i < nb_output_streams; i++) {
4647 ost = output_streams[i];
4650 if (fclose(ost->logfile))
4651 av_log(NULL, AV_LOG_ERROR,
4652 "Error closing logfile, loss of information possible: %s\n",
4653 av_err2str(AVERROR(errno)));
4654 ost->logfile = NULL;
4656 av_freep(&ost->forced_kf_pts);
4657 av_freep(&ost->apad);
4658 av_freep(&ost->disposition);
4659 av_dict_free(&ost->encoder_opts);
4660 av_dict_free(&ost->sws_dict);
4661 av_dict_free(&ost->swr_opts);
4662 av_dict_free(&ost->resample_opts);
4670 static int64_t getutime(void)
4673 struct rusage rusage;
4675 getrusage(RUSAGE_SELF, &rusage);
4676 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4677 #elif HAVE_GETPROCESSTIMES
4679 FILETIME c, e, k, u;
4680 proc = GetCurrentProcess();
4681 GetProcessTimes(proc, &c, &e, &k, &u);
4682 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4684 return av_gettime_relative();
4688 static int64_t getmaxrss(void)
4690 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4691 struct rusage rusage;
4692 getrusage(RUSAGE_SELF, &rusage);
4693 return (int64_t)rusage.ru_maxrss * 1024;
4694 #elif HAVE_GETPROCESSMEMORYINFO
4696 PROCESS_MEMORY_COUNTERS memcounters;
4697 proc = GetCurrentProcess();
4698 memcounters.cb = sizeof(memcounters);
4699 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4700 return memcounters.PeakPagefileUsage;
4706 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4710 int main(int argc, char **argv)
4717 register_exit(ffmpeg_cleanup);
4719 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4721 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4722 parse_loglevel(argc, argv, options);
4724 if(argc>1 && !strcmp(argv[1], "-d")){
4726 av_log_set_callback(log_callback_null);
4731 avcodec_register_all();
4733 avdevice_register_all();
4735 avfilter_register_all();
4737 avformat_network_init();
4739 show_banner(argc, argv, options);
4741 /* parse options and open all input/output files */
4742 ret = ffmpeg_parse_options(argc, argv);
4746 if (nb_output_files <= 0 && nb_input_files == 0) {
4748 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4752 /* file converter / grab */
4753 if (nb_output_files <= 0) {
4754 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4758 // if (nb_input_files == 0) {
4759 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4763 for (i = 0; i < nb_output_files; i++) {
4764 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4768 current_time = ti = getutime();
4769 if (transcode() < 0)
4771 ti = getutime() - ti;
4773 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4775 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4776 decode_error_stat[0], decode_error_stat[1]);
4777 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4780 exit_program(received_nb_signals ? 255 : main_return_code);
4781 return main_return_code;