2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && atomic_load(&transcode_init_done)) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
678 * Audio encoders may split the packets -- #frames in != #packets out.
679 * But there is no reordering, so we can limit the number of output packets
680 * by simply dropping them here.
681 * Counting encoded video frames needs to be done separately because of
682 * reordering, see do_video_out().
683 * Do not count the packet when unqueued because it has been counted when queued.
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
693 if (!of->header_written) {
694 AVPacket tmp_pkt = {0};
695 /* the muxer is not initialized yet, buffer the packet */
696 if (!av_fifo_space(ost->muxing_queue)) {
697 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698 ost->max_muxing_queue_size);
699 if (new_size <= av_fifo_size(ost->muxing_queue)) {
700 av_log(NULL, AV_LOG_ERROR,
701 "Too many packets buffered for output stream %d:%d.\n",
702 ost->file_index, ost->st->index);
705 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
709 ret = av_packet_ref(&tmp_pkt, pkt);
712 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713 av_packet_unref(pkt);
717 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
718 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
719 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 ost->quality = sd ? AV_RL32(sd) : -1;
726 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 ost->error[i] = AV_RL64(sd + 8 + 8*i);
735 if (ost->frame_rate.num && ost->is_cfr) {
736 if (pkt->duration > 0)
737 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
743 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746 if (pkt->dts != AV_NOPTS_VALUE &&
747 pkt->pts != AV_NOPTS_VALUE &&
748 pkt->dts > pkt->pts) {
749 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 ost->file_index, ost->st->index);
753 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
758 pkt->dts != AV_NOPTS_VALUE &&
759 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760 ost->last_mux_dts != AV_NOPTS_VALUE) {
761 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762 if (pkt->dts < max) {
763 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764 av_log(s, loglevel, "Non-monotonous DTS in output stream "
765 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
771 av_log(s, loglevel, "changing to %"PRId64". This may result "
772 "in incorrect timestamps in the output file.\n",
774 if (pkt->pts >= pkt->dts)
775 pkt->pts = FFMAX(pkt->pts, max);
780 ost->last_mux_dts = pkt->dts;
782 ost->data_size += pkt->size;
783 ost->packets_written++;
785 pkt->stream_index = ost->index;
788 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790 av_get_media_type_string(ost->enc_ctx->codec_type),
791 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
797 ret = av_interleaved_write_frame(s, pkt);
799 print_error("av_interleaved_write_frame()", ret);
800 main_return_code = 1;
801 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 av_packet_unref(pkt);
806 static void close_output_stream(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 ost->finished |= ENCODER_FINISHED;
812 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813 of->recording_time = FFMIN(of->recording_time, end);
818 * Send a single packet to the output, applying any bitstream filters
819 * associated with the output stream. This may result in any number
820 * of packets actually being written, depending on what bitstream
821 * filters are applied. The supplied packet is consumed and will be
822 * blank (as if newly-allocated) when this function returns.
824 * If eof is set, instead indicate EOF to all bitstream filters and
825 * therefore flush any delayed packets to the output. A blank packet
826 * must be supplied in this case.
828 static void output_packet(OutputFile *of, AVPacket *pkt,
829 OutputStream *ost, int eof)
833 /* apply the output bitstream filters, if any */
834 if (ost->nb_bitstream_filters) {
837 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
844 /* get a packet from the previous filter up the chain */
845 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846 if (ret == AVERROR(EAGAIN)) {
850 } else if (ret == AVERROR_EOF) {
855 /* send it to the next filter down the chain or to the muxer */
856 if (idx < ost->nb_bitstream_filters) {
857 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
865 write_packet(of, pkt, ost, 0);
868 write_packet(of, pkt, ost, 0);
871 if (ret < 0 && ret != AVERROR_EOF) {
872 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
879 static int check_recording_time(OutputStream *ost)
881 OutputFile *of = output_files[ost->file_index];
883 if (of->recording_time != INT64_MAX &&
884 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
885 AV_TIME_BASE_Q) >= 0) {
886 close_output_stream(ost);
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
895 AVCodecContext *enc = ost->enc_ctx;
899 av_init_packet(&pkt);
903 if (!check_recording_time(ost))
906 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907 frame->pts = ost->sync_opts;
908 ost->sync_opts = frame->pts + frame->nb_samples;
909 ost->samples_encoded += frame->nb_samples;
910 ost->frames_encoded++;
912 av_assert0(pkt.size || !pkt.data);
913 update_benchmark(NULL);
915 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918 enc->time_base.num, enc->time_base.den);
921 ret = avcodec_send_frame(enc, frame);
926 ret = avcodec_receive_packet(enc, &pkt);
927 if (ret == AVERROR(EAGAIN))
932 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
934 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
937 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
943 output_packet(of, &pkt, ost, 0);
948 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
952 static void do_subtitle_out(OutputFile *of,
956 int subtitle_out_max_size = 1024 * 1024;
957 int subtitle_out_size, nb, i;
962 if (sub->pts == AV_NOPTS_VALUE) {
963 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972 subtitle_out = av_malloc(subtitle_out_max_size);
974 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
979 /* Note: DVB subtitle need one packet to draw them and one other
980 packet to clear them */
981 /* XXX: signal it in the codec context ? */
982 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
987 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
989 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990 pts -= output_files[ost->file_index]->start_time;
991 for (i = 0; i < nb; i++) {
992 unsigned save_num_rects = sub->num_rects;
994 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995 if (!check_recording_time(ost))
999 // start_display_time is required to be 0
1000 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001 sub->end_display_time -= sub->start_display_time;
1002 sub->start_display_time = 0;
1006 ost->frames_encoded++;
1008 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009 subtitle_out_max_size, sub);
1011 sub->num_rects = save_num_rects;
1012 if (subtitle_out_size < 0) {
1013 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1017 av_init_packet(&pkt);
1018 pkt.data = subtitle_out;
1019 pkt.size = subtitle_out_size;
1020 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023 /* XXX: the pts correction is handled here. Maybe handling
1024 it in the codec would be better */
1026 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1031 output_packet(of, &pkt, ost, 0);
1035 static void do_video_out(OutputFile *of,
1037 AVFrame *next_picture,
1040 int ret, format_video_sync;
1042 AVCodecContext *enc = ost->enc_ctx;
1043 AVCodecParameters *mux_par = ost->st->codecpar;
1044 AVRational frame_rate;
1045 int nb_frames, nb0_frames, i;
1046 double delta, delta0;
1047 double duration = 0;
1049 InputStream *ist = NULL;
1050 AVFilterContext *filter = ost->filter->filter;
1052 if (ost->source_index >= 0)
1053 ist = input_streams[ost->source_index];
1055 frame_rate = av_buffersink_get_frame_rate(filter);
1056 if (frame_rate.num > 0 && frame_rate.den > 0)
1057 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1059 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1062 if (!ost->filters_script &&
1066 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1070 if (!next_picture) {
1072 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073 ost->last_nb0_frames[1],
1074 ost->last_nb0_frames[2]);
1076 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077 delta = delta0 + duration;
1079 /* by default, we output a single frame */
1080 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1083 format_video_sync = video_sync_method;
1084 if (format_video_sync == VSYNC_AUTO) {
1085 if(!strcmp(of->ctx->oformat->name, "avi")) {
1086 format_video_sync = VSYNC_VFR;
1088 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1090 && format_video_sync == VSYNC_CFR
1091 && input_files[ist->file_index]->ctx->nb_streams == 1
1092 && input_files[ist->file_index]->input_ts_offset == 0) {
1093 format_video_sync = VSYNC_VSCFR;
1095 if (format_video_sync == VSYNC_CFR && copy_ts) {
1096 format_video_sync = VSYNC_VSCFR;
1099 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1103 format_video_sync != VSYNC_PASSTHROUGH &&
1104 format_video_sync != VSYNC_DROP) {
1105 if (delta0 < -0.6) {
1106 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1108 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109 sync_ipts = ost->sync_opts;
1114 switch (format_video_sync) {
1116 if (ost->frame_number == 0 && delta0 >= 0.5) {
1117 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1120 ost->sync_opts = lrint(sync_ipts);
1123 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1126 } else if (delta < -1.1)
1128 else if (delta > 1.1) {
1129 nb_frames = lrintf(delta);
1131 nb0_frames = lrintf(delta0 - 0.6);
1137 else if (delta > 0.6)
1138 ost->sync_opts = lrint(sync_ipts);
1141 case VSYNC_PASSTHROUGH:
1142 ost->sync_opts = lrint(sync_ipts);
1149 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150 nb0_frames = FFMIN(nb0_frames, nb_frames);
1152 memmove(ost->last_nb0_frames + 1,
1153 ost->last_nb0_frames,
1154 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155 ost->last_nb0_frames[0] = nb0_frames;
1157 if (nb0_frames == 0 && ost->last_dropped) {
1159 av_log(NULL, AV_LOG_VERBOSE,
1160 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161 ost->frame_number, ost->st->index, ost->last_frame->pts);
1163 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164 if (nb_frames > dts_error_threshold * 30) {
1165 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1169 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171 if (nb_frames_dup > dup_warning) {
1172 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1176 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1178 /* duplicates frame if needed */
1179 for (i = 0; i < nb_frames; i++) {
1180 AVFrame *in_picture;
1181 av_init_packet(&pkt);
1185 if (i < nb0_frames && ost->last_frame) {
1186 in_picture = ost->last_frame;
1188 in_picture = next_picture;
1193 in_picture->pts = ost->sync_opts;
1196 if (!check_recording_time(ost))
1198 if (ost->frame_number >= ost->max_frames)
1202 #if FF_API_LAVF_FMT_RAWPICTURE
1203 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1204 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1205 /* raw pictures are written as AVPicture structure to
1206 avoid any copies. We support temporarily the older
1208 if (in_picture->interlaced_frame)
1209 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1211 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1212 pkt.data = (uint8_t *)in_picture;
1213 pkt.size = sizeof(AVPicture);
1214 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1215 pkt.flags |= AV_PKT_FLAG_KEY;
1217 output_packet(of, &pkt, ost, 0);
1221 int forced_keyframe = 0;
1224 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1225 ost->top_field_first >= 0)
1226 in_picture->top_field_first = !!ost->top_field_first;
1228 if (in_picture->interlaced_frame) {
1229 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 in_picture->quality = enc->global_quality;
1237 in_picture->pict_type = 0;
1239 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240 in_picture->pts * av_q2d(enc->time_base) : NAN;
1241 if (ost->forced_kf_index < ost->forced_kf_count &&
1242 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243 ost->forced_kf_index++;
1244 forced_keyframe = 1;
1245 } else if (ost->forced_keyframes_pexpr) {
1247 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248 res = av_expr_eval(ost->forced_keyframes_pexpr,
1249 ost->forced_keyframes_expr_const_values, NULL);
1250 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251 ost->forced_keyframes_expr_const_values[FKF_N],
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254 ost->forced_keyframes_expr_const_values[FKF_T],
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1258 forced_keyframe = 1;
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260 ost->forced_keyframes_expr_const_values[FKF_N];
1261 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262 ost->forced_keyframes_expr_const_values[FKF_T];
1263 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1266 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267 } else if ( ost->forced_keyframes
1268 && !strncmp(ost->forced_keyframes, "source", 6)
1269 && in_picture->key_frame==1) {
1270 forced_keyframe = 1;
1273 if (forced_keyframe) {
1274 in_picture->pict_type = AV_PICTURE_TYPE_I;
1275 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1278 update_benchmark(NULL);
1280 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283 enc->time_base.num, enc->time_base.den);
1286 ost->frames_encoded++;
1288 ret = avcodec_send_frame(enc, in_picture);
1293 ret = avcodec_receive_packet(enc, &pkt);
1294 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295 if (ret == AVERROR(EAGAIN))
1301 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1307 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308 pkt.pts = ost->sync_opts;
1310 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319 frame_size = pkt.size;
1320 output_packet(of, &pkt, ost, 0);
1322 /* if two pass, output log */
1323 if (ost->logfile && enc->stats_out) {
1324 fprintf(ost->logfile, "%s", enc->stats_out);
1330 * For video, number of frames in == number of packets out.
1331 * But there may be reordering, so we can't throw away frames on encoder
1332 * flush, we need to limit them here, before they go into encoder.
1334 ost->frame_number++;
1336 if (vstats_filename && frame_size)
1337 do_video_stats(ost, frame_size);
1340 if (!ost->last_frame)
1341 ost->last_frame = av_frame_alloc();
1342 av_frame_unref(ost->last_frame);
1343 if (next_picture && ost->last_frame)
1344 av_frame_ref(ost->last_frame, next_picture);
1346 av_frame_free(&ost->last_frame);
1350 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1354 static double psnr(double d)
1356 return -10.0 * log10(d);
1359 static void do_video_stats(OutputStream *ost, int frame_size)
1361 AVCodecContext *enc;
1363 double ti1, bitrate, avg_bitrate;
1365 /* this is executed just the first time do_video_stats is called */
1367 vstats_file = fopen(vstats_filename, "w");
1375 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376 frame_number = ost->st->nb_frames;
1377 if (vstats_version <= 1) {
1378 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379 ost->quality / (float)FF_QP2LAMBDA);
1381 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382 ost->quality / (float)FF_QP2LAMBDA);
1385 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 fprintf(vstats_file,"f_size= %6d ", frame_size);
1389 /* compute pts value */
1390 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1394 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 static void finish_output_stream(OutputStream *ost)
1406 OutputFile *of = output_files[ost->file_index];
1409 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1412 for (i = 0; i < of->ctx->nb_streams; i++)
1413 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1418 * Get and encode new output from any of the filtergraphs, without causing
1421 * @return 0 for success, <0 for severe errors
1423 static int reap_filters(int flush)
1425 AVFrame *filtered_frame = NULL;
1428 /* Reap all buffers present in the buffer sinks */
1429 for (i = 0; i < nb_output_streams; i++) {
1430 OutputStream *ost = output_streams[i];
1431 OutputFile *of = output_files[ost->file_index];
1432 AVFilterContext *filter;
1433 AVCodecContext *enc = ost->enc_ctx;
1436 if (!ost->filter || !ost->filter->graph->graph)
1438 filter = ost->filter->filter;
1440 if (!ost->initialized) {
1441 char error[1024] = "";
1442 ret = init_output_stream(ost, error, sizeof(error));
1444 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445 ost->file_index, ost->index, error);
1450 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451 return AVERROR(ENOMEM);
1453 filtered_frame = ost->filtered_frame;
1456 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1458 AV_BUFFERSINK_FLAG_NO_REQUEST);
1460 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1461 av_log(NULL, AV_LOG_WARNING,
1462 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463 } else if (flush && ret == AVERROR_EOF) {
1464 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1465 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1469 if (ost->finished) {
1470 av_frame_unref(filtered_frame);
1473 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475 AVRational filter_tb = av_buffersink_get_time_base(filter);
1476 AVRational tb = enc->time_base;
1477 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 tb.den <<= extra_bits;
1481 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483 float_pts /= 1 << extra_bits;
1484 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 filtered_frame->pts =
1488 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491 //if (ost->source_index >= 0)
1492 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1494 switch (av_buffersink_get_type(filter)) {
1495 case AVMEDIA_TYPE_VIDEO:
1496 if (!ost->frame_aspect_ratio.num)
1497 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1500 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503 enc->time_base.num, enc->time_base.den);
1506 do_video_out(of, ost, filtered_frame, float_pts);
1508 case AVMEDIA_TYPE_AUDIO:
1509 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510 enc->channels != filtered_frame->channels) {
1511 av_log(NULL, AV_LOG_ERROR,
1512 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1515 do_audio_out(of, ost, filtered_frame);
1518 // TODO support subtitle filters
1522 av_frame_unref(filtered_frame);
1529 static void print_final_stats(int64_t total_size)
1531 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532 uint64_t subtitle_size = 0;
1533 uint64_t data_size = 0;
1534 float percent = -1.0;
1538 for (i = 0; i < nb_output_streams; i++) {
1539 OutputStream *ost = output_streams[i];
1540 switch (ost->enc_ctx->codec_type) {
1541 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544 default: other_size += ost->data_size; break;
1546 extra_size += ost->enc_ctx->extradata_size;
1547 data_size += ost->data_size;
1548 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1549 != AV_CODEC_FLAG_PASS1)
1553 if (data_size && total_size>0 && total_size >= data_size)
1554 percent = 100.0 * (total_size - data_size) / data_size;
1556 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557 video_size / 1024.0,
1558 audio_size / 1024.0,
1559 subtitle_size / 1024.0,
1560 other_size / 1024.0,
1561 extra_size / 1024.0);
1563 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565 av_log(NULL, AV_LOG_INFO, "unknown");
1566 av_log(NULL, AV_LOG_INFO, "\n");
1568 /* print verbose per-stream stats */
1569 for (i = 0; i < nb_input_files; i++) {
1570 InputFile *f = input_files[i];
1571 uint64_t total_packets = 0, total_size = 0;
1573 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574 i, f->ctx->filename);
1576 for (j = 0; j < f->nb_streams; j++) {
1577 InputStream *ist = input_streams[f->ist_index + j];
1578 enum AVMediaType type = ist->dec_ctx->codec_type;
1580 total_size += ist->data_size;
1581 total_packets += ist->nb_packets;
1583 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584 i, j, media_type_string(type));
1585 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586 ist->nb_packets, ist->data_size);
1588 if (ist->decoding_needed) {
1589 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590 ist->frames_decoded);
1591 if (type == AVMEDIA_TYPE_AUDIO)
1592 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593 av_log(NULL, AV_LOG_VERBOSE, "; ");
1596 av_log(NULL, AV_LOG_VERBOSE, "\n");
1599 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600 total_packets, total_size);
1603 for (i = 0; i < nb_output_files; i++) {
1604 OutputFile *of = output_files[i];
1605 uint64_t total_packets = 0, total_size = 0;
1607 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608 i, of->ctx->filename);
1610 for (j = 0; j < of->ctx->nb_streams; j++) {
1611 OutputStream *ost = output_streams[of->ost_index + j];
1612 enum AVMediaType type = ost->enc_ctx->codec_type;
1614 total_size += ost->data_size;
1615 total_packets += ost->packets_written;
1617 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618 i, j, media_type_string(type));
1619 if (ost->encoding_needed) {
1620 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621 ost->frames_encoded);
1622 if (type == AVMEDIA_TYPE_AUDIO)
1623 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624 av_log(NULL, AV_LOG_VERBOSE, "; ");
1627 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628 ost->packets_written, ost->data_size);
1630 av_log(NULL, AV_LOG_VERBOSE, "\n");
1633 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634 total_packets, total_size);
1636 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639 av_log(NULL, AV_LOG_WARNING, "\n");
1641 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1649 AVBPrint buf_script;
1651 AVFormatContext *oc;
1653 AVCodecContext *enc;
1654 int frame_number, vid, i;
1657 int64_t pts = INT64_MIN + 1;
1658 static int64_t last_time = -1;
1659 static int qp_histogram[52];
1660 int hours, mins, secs, us;
1664 if (!print_stats && !is_last_report && !progress_avio)
1667 if (!is_last_report) {
1668 if (last_time == -1) {
1669 last_time = cur_time;
1672 if ((cur_time - last_time) < 500000)
1674 last_time = cur_time;
1677 t = (cur_time-timer_start) / 1000000.0;
1680 oc = output_files[0]->ctx;
1682 total_size = avio_size(oc->pb);
1683 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684 total_size = avio_tell(oc->pb);
1688 av_bprint_init(&buf_script, 0, 1);
1689 for (i = 0; i < nb_output_streams; i++) {
1691 ost = output_streams[i];
1693 if (!ost->stream_copy)
1694 q = ost->quality / (float) FF_QP2LAMBDA;
1696 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1698 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699 ost->file_index, ost->index, q);
1701 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1704 frame_number = ost->frame_number;
1705 fps = t > 1 ? frame_number / t : 0;
1706 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1707 frame_number, fps < 9.95, fps, q);
1708 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1710 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711 ost->file_index, ost->index, q);
1713 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1717 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719 for (j = 0; j < 32; j++)
1720 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1723 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725 double error, error_sum = 0;
1726 double scale, scale_sum = 0;
1728 char type[3] = { 'Y','U','V' };
1729 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1730 for (j = 0; j < 3; j++) {
1731 if (is_last_report) {
1732 error = enc->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735 error = ost->error[j];
1736 scale = enc->width * enc->height * 255.0 * 255.0;
1742 p = psnr(error / scale);
1743 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1744 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745 ost->file_index, ost->index, type[j] | 32, p);
1747 p = psnr(error_sum / scale_sum);
1748 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1749 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750 ost->file_index, ost->index, p);
1754 /* compute min output value */
1755 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1756 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757 ost->st->time_base, AV_TIME_BASE_Q));
1759 nb_frames_drop += ost->last_dropped;
1762 secs = FFABS(pts) / AV_TIME_BASE;
1763 us = FFABS(pts) % AV_TIME_BASE;
1769 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 "size=%8.0fkB time=", total_size / 1024.0);
1777 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1779 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1780 (100 * us) / AV_TIME_BASE);
1783 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1784 av_bprintf(&buf_script, "bitrate=N/A\n");
1786 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1787 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1790 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1793 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1794 hours, mins, secs, us);
1796 if (nb_frames_dup || nb_frames_drop)
1797 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1798 nb_frames_dup, nb_frames_drop);
1799 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1800 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1803 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1804 av_bprintf(&buf_script, "speed=N/A\n");
1806 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1807 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1810 if (print_stats || is_last_report) {
1811 const char end = is_last_report ? '\n' : '\r';
1812 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1813 fprintf(stderr, "%s %c", buf, end);
1815 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1820 if (progress_avio) {
1821 av_bprintf(&buf_script, "progress=%s\n",
1822 is_last_report ? "end" : "continue");
1823 avio_write(progress_avio, buf_script.str,
1824 FFMIN(buf_script.len, buf_script.size - 1));
1825 avio_flush(progress_avio);
1826 av_bprint_finalize(&buf_script, NULL);
1827 if (is_last_report) {
1828 if ((ret = avio_closep(&progress_avio)) < 0)
1829 av_log(NULL, AV_LOG_ERROR,
1830 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1835 print_final_stats(total_size);
1838 static void flush_encoders(void)
1842 for (i = 0; i < nb_output_streams; i++) {
1843 OutputStream *ost = output_streams[i];
1844 AVCodecContext *enc = ost->enc_ctx;
1845 OutputFile *of = output_files[ost->file_index];
1847 if (!ost->encoding_needed)
1850 // Try to enable encoding with no input frames.
1851 // Maybe we should just let encoding fail instead.
1852 if (!ost->initialized) {
1853 FilterGraph *fg = ost->filter->graph;
1854 char error[1024] = "";
1856 av_log(NULL, AV_LOG_WARNING,
1857 "Finishing stream %d:%d without any data written to it.\n",
1858 ost->file_index, ost->st->index);
1860 if (ost->filter && !fg->graph) {
1862 for (x = 0; x < fg->nb_inputs; x++) {
1863 InputFilter *ifilter = fg->inputs[x];
1864 if (ifilter->format < 0) {
1865 AVCodecParameters *par = ifilter->ist->st->codecpar;
1866 // We never got any input. Set a fake format, which will
1867 // come from libavformat.
1868 ifilter->format = par->format;
1869 ifilter->sample_rate = par->sample_rate;
1870 ifilter->channels = par->channels;
1871 ifilter->channel_layout = par->channel_layout;
1872 ifilter->width = par->width;
1873 ifilter->height = par->height;
1874 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1878 if (!ifilter_has_all_input_formats(fg))
1881 ret = configure_filtergraph(fg);
1883 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1887 finish_output_stream(ost);
1890 ret = init_output_stream(ost, error, sizeof(error));
1892 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1893 ost->file_index, ost->index, error);
1898 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1900 #if FF_API_LAVF_FMT_RAWPICTURE
1901 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1905 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1909 const char *desc = NULL;
1913 switch (enc->codec_type) {
1914 case AVMEDIA_TYPE_AUDIO:
1917 case AVMEDIA_TYPE_VIDEO:
1924 av_init_packet(&pkt);
1928 update_benchmark(NULL);
1930 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1931 ret = avcodec_send_frame(enc, NULL);
1933 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1940 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1941 if (ret < 0 && ret != AVERROR_EOF) {
1942 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1947 if (ost->logfile && enc->stats_out) {
1948 fprintf(ost->logfile, "%s", enc->stats_out);
1950 if (ret == AVERROR_EOF) {
1951 output_packet(of, &pkt, ost, 1);
1954 if (ost->finished & MUXER_FINISHED) {
1955 av_packet_unref(&pkt);
1958 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959 pkt_size = pkt.size;
1960 output_packet(of, &pkt, ost, 0);
1961 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1962 do_video_stats(ost, pkt_size);
1969 * Check whether a packet from ist should be written into ost at this time
1971 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1973 OutputFile *of = output_files[ost->file_index];
1974 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1976 if (ost->source_index != ist_index)
1982 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1990 OutputFile *of = output_files[ost->file_index];
1991 InputFile *f = input_files [ist->file_index];
1992 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997 av_init_packet(&opkt);
1999 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000 !ost->copy_initial_nonkeyframes)
2003 if (!ost->frame_number && !ost->copy_prior_start) {
2004 int64_t comp_start = start_time;
2005 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007 if (pkt->pts == AV_NOPTS_VALUE ?
2008 ist->pts < comp_start :
2009 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2013 if (of->recording_time != INT64_MAX &&
2014 ist->pts >= of->recording_time + start_time) {
2015 close_output_stream(ost);
2019 if (f->recording_time != INT64_MAX) {
2020 start_time = f->ctx->start_time;
2021 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022 start_time += f->start_time;
2023 if (ist->pts >= f->recording_time + start_time) {
2024 close_output_stream(ost);
2029 /* force the input stream PTS */
2030 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 opkt.pts = AV_NOPTS_VALUE;
2038 if (pkt->dts == AV_NOPTS_VALUE)
2039 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2041 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042 opkt.dts -= ost_tb_start_time;
2044 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2047 duration = ist->dec_ctx->frame_size;
2048 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2050 ost->mux_timebase) - ost_tb_start_time;
2053 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2055 opkt.flags = pkt->flags;
2056 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2062 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063 &opkt.data, &opkt.size,
2064 pkt->data, pkt->size,
2065 pkt->flags & AV_PKT_FLAG_KEY);
2067 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2072 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2080 av_copy_packet_side_data(&opkt, pkt);
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086 /* store AVPicture in AVPacket, as expected by the output format */
2087 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2089 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2093 opkt.data = (uint8_t *)&pict;
2094 opkt.size = sizeof(AVPicture);
2095 opkt.flags |= AV_PKT_FLAG_KEY;
2099 output_packet(of, &opkt, ost, 0);
2102 int guess_input_channel_layout(InputStream *ist)
2104 AVCodecContext *dec = ist->dec_ctx;
2106 if (!dec->channel_layout) {
2107 char layout_name[256];
2109 if (dec->channels > ist->guess_layout_max)
2111 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2112 if (!dec->channel_layout)
2114 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115 dec->channels, dec->channel_layout);
2116 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2124 if (*got_output || ret<0)
2125 decode_error_stat[ret<0] ++;
2127 if (ret < 0 && exit_on_error)
2130 if (exit_on_error && *got_output && ist) {
2131 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2132 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2157 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2161 switch (ifilter->ist->st->codecpar->codec_type) {
2162 case AVMEDIA_TYPE_AUDIO:
2163 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164 ifilter->channels != frame->channels ||
2165 ifilter->channel_layout != frame->channel_layout;
2167 case AVMEDIA_TYPE_VIDEO:
2168 need_reinit |= ifilter->width != frame->width ||
2169 ifilter->height != frame->height;
2174 ret = ifilter_parameters_from_frame(ifilter, frame);
2179 /* (re)init the graph if possible, otherwise buffer the frame and return */
2180 if (need_reinit || !fg->graph) {
2181 for (i = 0; i < fg->nb_inputs; i++) {
2182 if (!ifilter_has_all_input_formats(fg)) {
2183 AVFrame *tmp = av_frame_clone(frame);
2185 return AVERROR(ENOMEM);
2186 av_frame_unref(frame);
2188 if (!av_fifo_space(ifilter->frame_queue)) {
2189 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2191 av_frame_free(&tmp);
2195 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200 ret = reap_filters(1);
2201 if (ret < 0 && ret != AVERROR_EOF) {
2203 av_strerror(ret, errbuf, sizeof(errbuf));
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2209 ret = configure_filtergraph(fg);
2211 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2218 if (ret != AVERROR_EOF)
2219 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2232 if (ifilter->filter) {
2233 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2237 // the filtergraph was never configured
2238 FilterGraph *fg = ifilter->graph;
2239 for (i = 0; i < fg->nb_inputs; i++)
2240 if (!fg->inputs[i]->eof)
2242 if (i == fg->nb_inputs) {
2243 // All the input streams have finished without the filtergraph
2244 // ever being configured.
2245 // Mark the output streams as finished.
2246 for (j = 0; j < fg->nb_outputs; j++)
2247 finish_output_stream(fg->outputs[j]->ost);
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2257 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2265 ret = avcodec_send_packet(avctx, pkt);
2266 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267 // decoded frames with avcodec_receive_frame() until done.
2268 if (ret < 0 && ret != AVERROR_EOF)
2272 ret = avcodec_receive_frame(avctx, frame);
2273 if (ret < 0 && ret != AVERROR(EAGAIN))
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2286 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287 for (i = 0; i < ist->nb_filters; i++) {
2288 if (i < ist->nb_filters - 1) {
2289 f = ist->filter_frame;
2290 ret = av_frame_ref(f, decoded_frame);
2295 ret = ifilter_send_frame(ist->filters[i], f);
2296 if (ret == AVERROR_EOF)
2297 ret = 0; /* ignore */
2299 av_log(NULL, AV_LOG_ERROR,
2300 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2310 AVFrame *decoded_frame;
2311 AVCodecContext *avctx = ist->dec_ctx;
2313 AVRational decoded_frame_tb;
2315 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318 return AVERROR(ENOMEM);
2319 decoded_frame = ist->decoded_frame;
2321 update_benchmark(NULL);
2322 ret = decode(avctx, decoded_frame, got_output, pkt);
2323 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2327 if (ret >= 0 && avctx->sample_rate <= 0) {
2328 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329 ret = AVERROR_INVALIDDATA;
2332 if (ret != AVERROR_EOF)
2333 check_decode_result(ist, got_output, ret);
2335 if (!*got_output || ret < 0)
2338 ist->samples_decoded += decoded_frame->nb_samples;
2339 ist->frames_decoded++;
2342 /* increment next_dts to use for the case where the input stream does not
2343 have timestamps or there are multiple frames in the packet */
2344 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2346 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2350 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351 decoded_frame_tb = ist->st->time_base;
2352 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353 decoded_frame->pts = pkt->pts;
2354 decoded_frame_tb = ist->st->time_base;
2356 decoded_frame->pts = ist->dts;
2357 decoded_frame_tb = AV_TIME_BASE_Q;
2359 if (decoded_frame->pts != AV_NOPTS_VALUE)
2360 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362 (AVRational){1, avctx->sample_rate});
2363 ist->nb_samples = decoded_frame->nb_samples;
2364 err = send_frame_to_filters(ist, decoded_frame);
2366 av_frame_unref(ist->filter_frame);
2367 av_frame_unref(decoded_frame);
2368 return err < 0 ? err : ret;
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2374 AVFrame *decoded_frame;
2375 int i, ret = 0, err = 0;
2376 int64_t best_effort_timestamp;
2377 int64_t dts = AV_NOPTS_VALUE;
2380 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2383 if (!eof && pkt && pkt->size == 0)
2386 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389 return AVERROR(ENOMEM);
2390 decoded_frame = ist->decoded_frame;
2391 if (ist->dts != AV_NOPTS_VALUE)
2392 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2395 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2398 // The old code used to set dts on the drain packet, which does not work
2399 // with the new API anymore.
2401 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2403 return AVERROR(ENOMEM);
2404 ist->dts_buffer = new;
2405 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2408 update_benchmark(NULL);
2409 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2414 // The following line may be required in some cases where there is no parser
2415 // or the parser does not has_b_frames correctly
2416 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2420 av_log(ist->dec_ctx, AV_LOG_WARNING,
2421 "video_delay is larger in decoder than demuxer %d > %d.\n"
2422 "If you want to help, upload a sample "
2423 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425 ist->dec_ctx->has_b_frames,
2426 ist->st->codecpar->video_delay);
2429 if (ret != AVERROR_EOF)
2430 check_decode_result(ist, got_output, ret);
2432 if (*got_output && ret >= 0) {
2433 if (ist->dec_ctx->width != decoded_frame->width ||
2434 ist->dec_ctx->height != decoded_frame->height ||
2435 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437 decoded_frame->width,
2438 decoded_frame->height,
2439 decoded_frame->format,
2440 ist->dec_ctx->width,
2441 ist->dec_ctx->height,
2442 ist->dec_ctx->pix_fmt);
2446 if (!*got_output || ret < 0)
2449 if(ist->top_field_first>=0)
2450 decoded_frame->top_field_first = ist->top_field_first;
2452 ist->frames_decoded++;
2454 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2459 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2461 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2462 *duration_pts = decoded_frame->pkt_duration;
2464 if (ist->framerate.num)
2465 best_effort_timestamp = ist->cfr_next_pts++;
2467 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2468 best_effort_timestamp = ist->dts_buffer[0];
2470 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2471 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2472 ist->nb_dts_buffer--;
2475 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2476 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2478 if (ts != AV_NOPTS_VALUE)
2479 ist->next_pts = ist->pts = ts;
2483 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2484 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2485 ist->st->index, av_ts2str(decoded_frame->pts),
2486 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2487 best_effort_timestamp,
2488 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2489 decoded_frame->key_frame, decoded_frame->pict_type,
2490 ist->st->time_base.num, ist->st->time_base.den);
2493 if (ist->st->sample_aspect_ratio.num)
2494 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2496 err = send_frame_to_filters(ist, decoded_frame);
2499 av_frame_unref(ist->filter_frame);
2500 av_frame_unref(decoded_frame);
2501 return err < 0 ? err : ret;
2504 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2507 AVSubtitle subtitle;
2509 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2510 &subtitle, got_output, pkt);
2512 check_decode_result(NULL, got_output, ret);
2514 if (ret < 0 || !*got_output) {
2517 sub2video_flush(ist);
2521 if (ist->fix_sub_duration) {
2523 if (ist->prev_sub.got_output) {
2524 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2525 1000, AV_TIME_BASE);
2526 if (end < ist->prev_sub.subtitle.end_display_time) {
2527 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2528 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2529 ist->prev_sub.subtitle.end_display_time, end,
2530 end <= 0 ? ", dropping it" : "");
2531 ist->prev_sub.subtitle.end_display_time = end;
2534 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2535 FFSWAP(int, ret, ist->prev_sub.ret);
2536 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2544 if (ist->sub2video.frame) {
2545 sub2video_update(ist, &subtitle);
2546 } else if (ist->nb_filters) {
2547 if (!ist->sub2video.sub_queue)
2548 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2549 if (!ist->sub2video.sub_queue)
2551 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2552 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2556 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2560 if (!subtitle.num_rects)
2563 ist->frames_decoded++;
2565 for (i = 0; i < nb_output_streams; i++) {
2566 OutputStream *ost = output_streams[i];
2568 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2569 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2572 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2577 avsubtitle_free(&subtitle);
2581 static int send_filter_eof(InputStream *ist)
2584 /* TODO keep pts also in stream time base to avoid converting back */
2585 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2586 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2588 for (i = 0; i < ist->nb_filters; i++) {
2589 ret = ifilter_send_eof(ist->filters[i], pts);
2596 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2597 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2601 int eof_reached = 0;
2604 if (!ist->saw_first_ts) {
2605 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2607 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2608 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2609 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2611 ist->saw_first_ts = 1;
2614 if (ist->next_dts == AV_NOPTS_VALUE)
2615 ist->next_dts = ist->dts;
2616 if (ist->next_pts == AV_NOPTS_VALUE)
2617 ist->next_pts = ist->pts;
2621 av_init_packet(&avpkt);
2628 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2629 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2630 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2631 ist->next_pts = ist->pts = ist->dts;
2634 // while we have more to decode or while the decoder did output something on EOF
2635 while (ist->decoding_needed) {
2636 int64_t duration_dts = 0;
2637 int64_t duration_pts = 0;
2639 int decode_failed = 0;
2641 ist->pts = ist->next_pts;
2642 ist->dts = ist->next_dts;
2644 switch (ist->dec_ctx->codec_type) {
2645 case AVMEDIA_TYPE_AUDIO:
2646 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2649 case AVMEDIA_TYPE_VIDEO:
2650 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2652 if (!repeating || !pkt || got_output) {
2653 if (pkt && pkt->duration) {
2654 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2655 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2656 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2657 duration_dts = ((int64_t)AV_TIME_BASE *
2658 ist->dec_ctx->framerate.den * ticks) /
2659 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2662 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2663 ist->next_dts += duration_dts;
2665 ist->next_dts = AV_NOPTS_VALUE;
2669 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2671 case AVMEDIA_TYPE_SUBTITLE:
2674 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2675 if (!pkt && ret >= 0)
2682 if (ret == AVERROR_EOF) {
2688 if (decode_failed) {
2689 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2690 ist->file_index, ist->st->index, av_err2str(ret));
2692 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2693 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2695 if (!decode_failed || exit_on_error)
2701 ist->got_output = 1;
2706 // During draining, we might get multiple output frames in this loop.
2707 // ffmpeg.c does not drain the filter chain on configuration changes,
2708 // which means if we send multiple frames at once to the filters, and
2709 // one of those frames changes configuration, the buffered frames will
2710 // be lost. This can upset certain FATE tests.
2711 // Decode only 1 frame per call on EOF to appease these FATE tests.
2712 // The ideal solution would be to rewrite decoding to use the new
2713 // decoding API in a better way.
2720 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2721 /* except when looping we need to flush but not to send an EOF */
2722 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2723 int ret = send_filter_eof(ist);
2725 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2730 /* handle stream copy */
2731 if (!ist->decoding_needed) {
2732 ist->dts = ist->next_dts;
2733 switch (ist->dec_ctx->codec_type) {
2734 case AVMEDIA_TYPE_AUDIO:
2735 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2736 ist->dec_ctx->sample_rate;
2738 case AVMEDIA_TYPE_VIDEO:
2739 if (ist->framerate.num) {
2740 // TODO: Remove work-around for c99-to-c89 issue 7
2741 AVRational time_base_q = AV_TIME_BASE_Q;
2742 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2743 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2744 } else if (pkt->duration) {
2745 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2746 } else if(ist->dec_ctx->framerate.num != 0) {
2747 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2748 ist->next_dts += ((int64_t)AV_TIME_BASE *
2749 ist->dec_ctx->framerate.den * ticks) /
2750 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2754 ist->pts = ist->dts;
2755 ist->next_pts = ist->next_dts;
2757 for (i = 0; pkt && i < nb_output_streams; i++) {
2758 OutputStream *ost = output_streams[i];
2760 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2763 do_streamcopy(ist, ost, pkt);
2766 return !eof_reached;
2769 static void print_sdp(void)
2774 AVIOContext *sdp_pb;
2775 AVFormatContext **avc;
2777 for (i = 0; i < nb_output_files; i++) {
2778 if (!output_files[i]->header_written)
2782 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2785 for (i = 0, j = 0; i < nb_output_files; i++) {
2786 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2787 avc[j] = output_files[i]->ctx;
2795 av_sdp_create(avc, j, sdp, sizeof(sdp));
2797 if (!sdp_filename) {
2798 printf("SDP:\n%s\n", sdp);
2801 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2802 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2804 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2805 avio_closep(&sdp_pb);
2806 av_freep(&sdp_filename);
2814 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2817 for (i = 0; hwaccels[i].name; i++)
2818 if (hwaccels[i].pix_fmt == pix_fmt)
2819 return &hwaccels[i];
2823 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2825 InputStream *ist = s->opaque;
2826 const enum AVPixelFormat *p;
2829 for (p = pix_fmts; *p != -1; p++) {
2830 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2831 const HWAccel *hwaccel;
2833 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2836 hwaccel = get_hwaccel(*p);
2838 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2839 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2842 ret = hwaccel->init(s);
2844 if (ist->hwaccel_id == hwaccel->id) {
2845 av_log(NULL, AV_LOG_FATAL,
2846 "%s hwaccel requested for input stream #%d:%d, "
2847 "but cannot be initialized.\n", hwaccel->name,
2848 ist->file_index, ist->st->index);
2849 return AV_PIX_FMT_NONE;
2854 if (ist->hw_frames_ctx) {
2855 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2856 if (!s->hw_frames_ctx)
2857 return AV_PIX_FMT_NONE;
2860 ist->active_hwaccel_id = hwaccel->id;
2861 ist->hwaccel_pix_fmt = *p;
2868 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2870 InputStream *ist = s->opaque;
2872 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2873 return ist->hwaccel_get_buffer(s, frame, flags);
2875 return avcodec_default_get_buffer2(s, frame, flags);
2878 static int init_input_stream(int ist_index, char *error, int error_len)
2881 InputStream *ist = input_streams[ist_index];
2883 if (ist->decoding_needed) {
2884 AVCodec *codec = ist->dec;
2886 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2887 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2888 return AVERROR(EINVAL);
2891 ist->dec_ctx->opaque = ist;
2892 ist->dec_ctx->get_format = get_format;
2893 ist->dec_ctx->get_buffer2 = get_buffer;
2894 ist->dec_ctx->thread_safe_callbacks = 1;
2896 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2897 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2898 (ist->decoding_needed & DECODING_FOR_OST)) {
2899 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2900 if (ist->decoding_needed & DECODING_FOR_FILTER)
2901 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2904 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2906 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2907 * audio, and video decoders such as cuvid or mediacodec */
2908 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2910 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2911 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2913 ret = hw_device_setup_for_decode(ist);
2915 snprintf(error, error_len, "Device setup failed for "
2916 "decoder on input stream #%d:%d : %s",
2917 ist->file_index, ist->st->index, av_err2str(ret));
2921 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2922 if (ret == AVERROR_EXPERIMENTAL)
2923 abort_codec_experimental(codec, 0);
2925 snprintf(error, error_len,
2926 "Error while opening decoder for input stream "
2928 ist->file_index, ist->st->index, av_err2str(ret));
2931 assert_avoptions(ist->decoder_opts);
2934 ist->next_pts = AV_NOPTS_VALUE;
2935 ist->next_dts = AV_NOPTS_VALUE;
2940 static InputStream *get_input_stream(OutputStream *ost)
2942 if (ost->source_index >= 0)
2943 return input_streams[ost->source_index];
2947 static int compare_int64(const void *a, const void *b)
2949 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2952 /* open the muxer when all the streams are initialized */
2953 static int check_init_output_file(OutputFile *of, int file_index)
2957 for (i = 0; i < of->ctx->nb_streams; i++) {
2958 OutputStream *ost = output_streams[of->ost_index + i];
2959 if (!ost->initialized)
2963 of->ctx->interrupt_callback = int_cb;
2965 ret = avformat_write_header(of->ctx, &of->opts);
2967 av_log(NULL, AV_LOG_ERROR,
2968 "Could not write header for output file #%d "
2969 "(incorrect codec parameters ?): %s\n",
2970 file_index, av_err2str(ret));
2973 //assert_avoptions(of->opts);
2974 of->header_written = 1;
2976 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2978 if (sdp_filename || want_sdp)
2981 /* flush the muxing queues */
2982 for (i = 0; i < of->ctx->nb_streams; i++) {
2983 OutputStream *ost = output_streams[of->ost_index + i];
2985 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2986 if (!av_fifo_size(ost->muxing_queue))
2987 ost->mux_timebase = ost->st->time_base;
2989 while (av_fifo_size(ost->muxing_queue)) {
2991 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2992 write_packet(of, &pkt, ost, 1);
2999 static int init_output_bsfs(OutputStream *ost)
3004 if (!ost->nb_bitstream_filters)
3007 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3008 ctx = ost->bsf_ctx[i];
3010 ret = avcodec_parameters_copy(ctx->par_in,
3011 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3015 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3017 ret = av_bsf_init(ctx);
3019 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3020 ost->bsf_ctx[i]->filter->name);
3025 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3026 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3030 ost->st->time_base = ctx->time_base_out;
3035 static int init_output_stream_streamcopy(OutputStream *ost)
3037 OutputFile *of = output_files[ost->file_index];
3038 InputStream *ist = get_input_stream(ost);
3039 AVCodecParameters *par_dst = ost->st->codecpar;
3040 AVCodecParameters *par_src = ost->ref_par;
3043 uint32_t codec_tag = par_dst->codec_tag;
3045 av_assert0(ist && !ost->filter);
3047 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3049 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3051 av_log(NULL, AV_LOG_FATAL,
3052 "Error setting up codec context options.\n");
3055 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3058 unsigned int codec_tag_tmp;
3059 if (!of->ctx->oformat->codec_tag ||
3060 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3061 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3062 codec_tag = par_src->codec_tag;
3065 ret = avcodec_parameters_copy(par_dst, par_src);
3069 par_dst->codec_tag = codec_tag;
3071 if (!ost->frame_rate.num)
3072 ost->frame_rate = ist->framerate;
3073 ost->st->avg_frame_rate = ost->frame_rate;
3075 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3079 // copy timebase while removing common factors
3080 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3081 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3083 // copy estimated duration as a hint to the muxer
3084 if (ost->st->duration <= 0 && ist->st->duration > 0)
3085 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3088 ost->st->disposition = ist->st->disposition;
3090 if (ist->st->nb_side_data) {
3091 for (i = 0; i < ist->st->nb_side_data; i++) {
3092 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3095 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3097 return AVERROR(ENOMEM);
3098 memcpy(dst_data, sd_src->data, sd_src->size);
3102 if (ost->rotate_overridden) {
3103 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3104 sizeof(int32_t) * 9);
3106 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3109 ost->parser = av_parser_init(par_dst->codec_id);
3110 ost->parser_avctx = avcodec_alloc_context3(NULL);
3111 if (!ost->parser_avctx)
3112 return AVERROR(ENOMEM);
3114 switch (par_dst->codec_type) {
3115 case AVMEDIA_TYPE_AUDIO:
3116 if (audio_volume != 256) {
3117 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3120 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3121 par_dst->block_align= 0;
3122 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3123 par_dst->block_align= 0;
3125 case AVMEDIA_TYPE_VIDEO:
3126 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3128 av_mul_q(ost->frame_aspect_ratio,
3129 (AVRational){ par_dst->height, par_dst->width });
3130 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3131 "with stream copy may produce invalid files\n");
3133 else if (ist->st->sample_aspect_ratio.num)
3134 sar = ist->st->sample_aspect_ratio;
3136 sar = par_src->sample_aspect_ratio;
3137 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3138 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3139 ost->st->r_frame_rate = ist->st->r_frame_rate;
3143 ost->mux_timebase = ist->st->time_base;
3148 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3150 AVDictionaryEntry *e;
3152 uint8_t *encoder_string;
3153 int encoder_string_len;
3154 int format_flags = 0;
3155 int codec_flags = 0;
3157 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3160 e = av_dict_get(of->opts, "fflags", NULL, 0);
3162 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3165 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3167 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3169 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3172 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3175 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3176 encoder_string = av_mallocz(encoder_string_len);
3177 if (!encoder_string)
3180 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3181 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3183 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3184 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3185 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3186 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3189 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3190 AVCodecContext *avctx)
3193 int n = 1, i, size, index = 0;
3196 for (p = kf; *p; p++)
3200 pts = av_malloc_array(size, sizeof(*pts));
3202 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3207 for (i = 0; i < n; i++) {
3208 char *next = strchr(p, ',');
3213 if (!memcmp(p, "chapters", 8)) {
3215 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3218 if (avf->nb_chapters > INT_MAX - size ||
3219 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3221 av_log(NULL, AV_LOG_FATAL,
3222 "Could not allocate forced key frames array.\n");
3225 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3226 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3228 for (j = 0; j < avf->nb_chapters; j++) {
3229 AVChapter *c = avf->chapters[j];
3230 av_assert1(index < size);
3231 pts[index++] = av_rescale_q(c->start, c->time_base,
3232 avctx->time_base) + t;
3237 t = parse_time_or_die("force_key_frames", p, 1);
3238 av_assert1(index < size);
3239 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3246 av_assert0(index == size);
3247 qsort(pts, size, sizeof(*pts), compare_int64);
3248 ost->forced_kf_count = size;
3249 ost->forced_kf_pts = pts;
3252 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3254 InputStream *ist = get_input_stream(ost);
3255 AVCodecContext *enc_ctx = ost->enc_ctx;
3256 AVFormatContext *oc;
3258 if (ost->enc_timebase.num > 0) {
3259 enc_ctx->time_base = ost->enc_timebase;
3263 if (ost->enc_timebase.num < 0) {
3265 enc_ctx->time_base = ist->st->time_base;
3269 oc = output_files[ost->file_index]->ctx;
3270 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3273 enc_ctx->time_base = default_time_base;
3276 static int init_output_stream_encode(OutputStream *ost)
3278 InputStream *ist = get_input_stream(ost);
3279 AVCodecContext *enc_ctx = ost->enc_ctx;
3280 AVCodecContext *dec_ctx = NULL;
3281 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3284 set_encoder_id(output_files[ost->file_index], ost);
3286 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3287 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3288 // which have to be filtered out to prevent leaking them to output files.
3289 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3292 ost->st->disposition = ist->st->disposition;
3294 dec_ctx = ist->dec_ctx;
3296 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3298 for (j = 0; j < oc->nb_streams; j++) {
3299 AVStream *st = oc->streams[j];
3300 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3303 if (j == oc->nb_streams)
3304 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3305 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3306 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3309 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3310 if (!ost->frame_rate.num)
3311 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3312 if (ist && !ost->frame_rate.num)
3313 ost->frame_rate = ist->framerate;
3314 if (ist && !ost->frame_rate.num)
3315 ost->frame_rate = ist->st->r_frame_rate;
3316 if (ist && !ost->frame_rate.num) {
3317 ost->frame_rate = (AVRational){25, 1};
3318 av_log(NULL, AV_LOG_WARNING,
3320 "about the input framerate is available. Falling "
3321 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3322 "if you want a different framerate.\n",
3323 ost->file_index, ost->index);
3325 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3326 if (ost->enc->supported_framerates && !ost->force_fps) {
3327 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3328 ost->frame_rate = ost->enc->supported_framerates[idx];
3330 // reduce frame rate for mpeg4 to be within the spec limits
3331 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3332 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3333 ost->frame_rate.num, ost->frame_rate.den, 65535);
3337 switch (enc_ctx->codec_type) {
3338 case AVMEDIA_TYPE_AUDIO:
3339 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3341 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3342 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3343 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3344 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3345 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3347 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3350 case AVMEDIA_TYPE_VIDEO:
3351 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3353 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3354 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3355 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3356 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3357 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3358 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3360 for (j = 0; j < ost->forced_kf_count; j++)
3361 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3363 enc_ctx->time_base);
3365 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3366 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3367 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3368 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3369 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3370 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3372 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3374 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3375 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3377 enc_ctx->framerate = ost->frame_rate;
3379 ost->st->avg_frame_rate = ost->frame_rate;
3382 enc_ctx->width != dec_ctx->width ||
3383 enc_ctx->height != dec_ctx->height ||
3384 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3385 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3388 if (ost->forced_keyframes) {
3389 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3390 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3391 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3393 av_log(NULL, AV_LOG_ERROR,
3394 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3397 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3398 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3399 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3400 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3402 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3403 // parse it only for static kf timings
3404 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3405 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3409 case AVMEDIA_TYPE_SUBTITLE:
3410 enc_ctx->time_base = AV_TIME_BASE_Q;
3411 if (!enc_ctx->width) {
3412 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3413 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3416 case AVMEDIA_TYPE_DATA:
3423 ost->mux_timebase = enc_ctx->time_base;
3428 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3432 if (ost->encoding_needed) {
3433 AVCodec *codec = ost->enc;
3434 AVCodecContext *dec = NULL;
3437 ret = init_output_stream_encode(ost);
3441 if ((ist = get_input_stream(ost)))
3443 if (dec && dec->subtitle_header) {
3444 /* ASS code assumes this buffer is null terminated so add extra byte. */
3445 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3446 if (!ost->enc_ctx->subtitle_header)
3447 return AVERROR(ENOMEM);
3448 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3449 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3451 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3452 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3453 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3455 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3456 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3457 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3459 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3460 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3461 av_buffersink_get_format(ost->filter->filter)) {
3462 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3463 if (!ost->enc_ctx->hw_frames_ctx)
3464 return AVERROR(ENOMEM);
3466 ret = hw_device_setup_for_encode(ost);
3468 snprintf(error, error_len, "Device setup failed for "
3469 "encoder on output stream #%d:%d : %s",
3470 ost->file_index, ost->index, av_err2str(ret));
3475 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3476 if (ret == AVERROR_EXPERIMENTAL)
3477 abort_codec_experimental(codec, 1);
3478 snprintf(error, error_len,
3479 "Error while opening encoder for output stream #%d:%d - "
3480 "maybe incorrect parameters such as bit_rate, rate, width or height",
3481 ost->file_index, ost->index);
3484 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3485 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3486 av_buffersink_set_frame_size(ost->filter->filter,
3487 ost->enc_ctx->frame_size);
3488 assert_avoptions(ost->encoder_opts);
3489 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3490 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3491 " It takes bits/s as argument, not kbits/s\n");
3493 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3495 av_log(NULL, AV_LOG_FATAL,
3496 "Error initializing the output stream codec context.\n");
3500 * FIXME: ost->st->codec should't be needed here anymore.
3502 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3506 if (ost->enc_ctx->nb_coded_side_data) {
3509 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3510 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3513 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3515 return AVERROR(ENOMEM);
3516 memcpy(dst_data, sd_src->data, sd_src->size);
3521 * Add global input side data. For now this is naive, and copies it
3522 * from the input stream's global side data. All side data should
3523 * really be funneled over AVFrame and libavfilter, then added back to
3524 * packet side data, and then potentially using the first packet for
3529 for (i = 0; i < ist->st->nb_side_data; i++) {
3530 AVPacketSideData *sd = &ist->st->side_data[i];
3531 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3533 return AVERROR(ENOMEM);
3534 memcpy(dst, sd->data, sd->size);
3535 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3536 av_display_rotation_set((uint32_t *)dst, 0);
3540 // copy timebase while removing common factors
3541 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3542 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3544 // copy estimated duration as a hint to the muxer
3545 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3546 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3548 ost->st->codec->codec= ost->enc_ctx->codec;
3549 } else if (ost->stream_copy) {
3550 ret = init_output_stream_streamcopy(ost);
3555 * FIXME: will the codec context used by the parser during streamcopy
3556 * This should go away with the new parser API.
3558 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3563 // parse user provided disposition, and update stream values
3564 if (ost->disposition) {
3565 static const AVOption opts[] = {
3566 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3567 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3568 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3569 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3570 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3571 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3572 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3573 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3574 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3575 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3576 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3577 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3578 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3579 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3582 static const AVClass class = {
3584 .item_name = av_default_item_name,
3586 .version = LIBAVUTIL_VERSION_INT,
3588 const AVClass *pclass = &class;
3590 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3595 /* initialize bitstream filters for the output stream
3596 * needs to be done here, because the codec id for streamcopy is not
3597 * known until now */
3598 ret = init_output_bsfs(ost);
3602 ost->initialized = 1;
3604 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3611 static void report_new_stream(int input_index, AVPacket *pkt)
3613 InputFile *file = input_files[input_index];
3614 AVStream *st = file->ctx->streams[pkt->stream_index];
3616 if (pkt->stream_index < file->nb_streams_warn)
3618 av_log(file->ctx, AV_LOG_WARNING,
3619 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3620 av_get_media_type_string(st->codecpar->codec_type),
3621 input_index, pkt->stream_index,
3622 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3623 file->nb_streams_warn = pkt->stream_index + 1;
3626 static int transcode_init(void)
3628 int ret = 0, i, j, k;
3629 AVFormatContext *oc;
3632 char error[1024] = {0};
3634 for (i = 0; i < nb_filtergraphs; i++) {
3635 FilterGraph *fg = filtergraphs[i];
3636 for (j = 0; j < fg->nb_outputs; j++) {
3637 OutputFilter *ofilter = fg->outputs[j];
3638 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3640 if (fg->nb_inputs != 1)
3642 for (k = nb_input_streams-1; k >= 0 ; k--)
3643 if (fg->inputs[0]->ist == input_streams[k])
3645 ofilter->ost->source_index = k;
3649 /* init framerate emulation */
3650 for (i = 0; i < nb_input_files; i++) {
3651 InputFile *ifile = input_files[i];
3652 if (ifile->rate_emu)
3653 for (j = 0; j < ifile->nb_streams; j++)
3654 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3657 /* init input streams */
3658 for (i = 0; i < nb_input_streams; i++)
3659 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3660 for (i = 0; i < nb_output_streams; i++) {
3661 ost = output_streams[i];
3662 avcodec_close(ost->enc_ctx);
3667 /* open each encoder */
3668 for (i = 0; i < nb_output_streams; i++) {
3669 // skip streams fed from filtergraphs until we have a frame for them
3670 if (output_streams[i]->filter)
3673 ret = init_output_stream(output_streams[i], error, sizeof(error));
3678 /* discard unused programs */
3679 for (i = 0; i < nb_input_files; i++) {
3680 InputFile *ifile = input_files[i];
3681 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3682 AVProgram *p = ifile->ctx->programs[j];
3683 int discard = AVDISCARD_ALL;
3685 for (k = 0; k < p->nb_stream_indexes; k++)
3686 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3687 discard = AVDISCARD_DEFAULT;
3690 p->discard = discard;
3694 /* write headers for files with no streams */
3695 for (i = 0; i < nb_output_files; i++) {
3696 oc = output_files[i]->ctx;
3697 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3698 ret = check_init_output_file(output_files[i], i);
3705 /* dump the stream mapping */
3706 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3707 for (i = 0; i < nb_input_streams; i++) {
3708 ist = input_streams[i];
3710 for (j = 0; j < ist->nb_filters; j++) {
3711 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3712 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3713 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3714 ist->filters[j]->name);
3715 if (nb_filtergraphs > 1)
3716 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3717 av_log(NULL, AV_LOG_INFO, "\n");
3722 for (i = 0; i < nb_output_streams; i++) {
3723 ost = output_streams[i];
3725 if (ost->attachment_filename) {
3726 /* an attached file */
3727 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3728 ost->attachment_filename, ost->file_index, ost->index);
3732 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3733 /* output from a complex graph */
3734 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3735 if (nb_filtergraphs > 1)
3736 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3738 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3739 ost->index, ost->enc ? ost->enc->name : "?");
3743 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3744 input_streams[ost->source_index]->file_index,
3745 input_streams[ost->source_index]->st->index,
3748 if (ost->sync_ist != input_streams[ost->source_index])
3749 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3750 ost->sync_ist->file_index,
3751 ost->sync_ist->st->index);
3752 if (ost->stream_copy)
3753 av_log(NULL, AV_LOG_INFO, " (copy)");
3755 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3756 const AVCodec *out_codec = ost->enc;
3757 const char *decoder_name = "?";
3758 const char *in_codec_name = "?";
3759 const char *encoder_name = "?";
3760 const char *out_codec_name = "?";
3761 const AVCodecDescriptor *desc;
3764 decoder_name = in_codec->name;
3765 desc = avcodec_descriptor_get(in_codec->id);
3767 in_codec_name = desc->name;
3768 if (!strcmp(decoder_name, in_codec_name))
3769 decoder_name = "native";
3773 encoder_name = out_codec->name;
3774 desc = avcodec_descriptor_get(out_codec->id);
3776 out_codec_name = desc->name;
3777 if (!strcmp(encoder_name, out_codec_name))
3778 encoder_name = "native";
3781 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3782 in_codec_name, decoder_name,
3783 out_codec_name, encoder_name);
3785 av_log(NULL, AV_LOG_INFO, "\n");
3789 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3793 atomic_store(&transcode_init_done, 1);
3798 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3799 static int need_output(void)
3803 for (i = 0; i < nb_output_streams; i++) {
3804 OutputStream *ost = output_streams[i];
3805 OutputFile *of = output_files[ost->file_index];
3806 AVFormatContext *os = output_files[ost->file_index]->ctx;
3808 if (ost->finished ||
3809 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3811 if (ost->frame_number >= ost->max_frames) {
3813 for (j = 0; j < of->ctx->nb_streams; j++)
3814 close_output_stream(output_streams[of->ost_index + j]);
3825 * Select the output stream to process.
3827 * @return selected output stream, or NULL if none available
3829 static OutputStream *choose_output(void)
3832 int64_t opts_min = INT64_MAX;
3833 OutputStream *ost_min = NULL;
3835 for (i = 0; i < nb_output_streams; i++) {
3836 OutputStream *ost = output_streams[i];
3837 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3838 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3840 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3841 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3843 if (!ost->initialized && !ost->inputs_done)
3846 if (!ost->finished && opts < opts_min) {
3848 ost_min = ost->unavailable ? NULL : ost;
3854 static void set_tty_echo(int on)
3858 if (tcgetattr(0, &tty) == 0) {
3859 if (on) tty.c_lflag |= ECHO;
3860 else tty.c_lflag &= ~ECHO;
3861 tcsetattr(0, TCSANOW, &tty);
3866 static int check_keyboard_interaction(int64_t cur_time)
3869 static int64_t last_time;
3870 if (received_nb_signals)
3871 return AVERROR_EXIT;
3872 /* read_key() returns 0 on EOF */
3873 if(cur_time - last_time >= 100000 && !run_as_daemon){
3875 last_time = cur_time;
3879 return AVERROR_EXIT;
3880 if (key == '+') av_log_set_level(av_log_get_level()+10);
3881 if (key == '-') av_log_set_level(av_log_get_level()-10);
3882 if (key == 's') qp_hist ^= 1;
3885 do_hex_dump = do_pkt_dump = 0;
3886 } else if(do_pkt_dump){
3890 av_log_set_level(AV_LOG_DEBUG);
3892 if (key == 'c' || key == 'C'){
3893 char buf[4096], target[64], command[256], arg[256] = {0};
3896 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3899 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3904 fprintf(stderr, "\n");
3906 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3907 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3908 target, time, command, arg);
3909 for (i = 0; i < nb_filtergraphs; i++) {
3910 FilterGraph *fg = filtergraphs[i];
3913 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3914 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3915 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3916 } else if (key == 'c') {
3917 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3918 ret = AVERROR_PATCHWELCOME;
3920 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3922 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3927 av_log(NULL, AV_LOG_ERROR,
3928 "Parse error, at least 3 arguments were expected, "
3929 "only %d given in string '%s'\n", n, buf);
3932 if (key == 'd' || key == 'D'){
3935 debug = input_streams[0]->st->codec->debug<<1;
3936 if(!debug) debug = 1;
3937 while(debug & (FF_DEBUG_DCT_COEFF
3939 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3941 )) //unsupported, would just crash
3948 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3953 fprintf(stderr, "\n");
3954 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3955 fprintf(stderr,"error parsing debug value\n");
3957 for(i=0;i<nb_input_streams;i++) {
3958 input_streams[i]->st->codec->debug = debug;
3960 for(i=0;i<nb_output_streams;i++) {
3961 OutputStream *ost = output_streams[i];
3962 ost->enc_ctx->debug = debug;
3964 if(debug) av_log_set_level(AV_LOG_DEBUG);
3965 fprintf(stderr,"debug=%d\n", debug);
3968 fprintf(stderr, "key function\n"
3969 "? show this help\n"
3970 "+ increase verbosity\n"
3971 "- decrease verbosity\n"
3972 "c Send command to first matching filter supporting it\n"
3973 "C Send/Queue command to all matching filters\n"
3974 "D cycle through available debug modes\n"
3975 "h dump packets/hex press to cycle through the 3 states\n"
3977 "s Show QP histogram\n"
3984 static void *input_thread(void *arg)
3987 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3992 ret = av_read_frame(f->ctx, &pkt);
3994 if (ret == AVERROR(EAGAIN)) {
3999 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4002 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4003 if (flags && ret == AVERROR(EAGAIN)) {
4005 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4006 av_log(f->ctx, AV_LOG_WARNING,
4007 "Thread message queue blocking; consider raising the "
4008 "thread_queue_size option (current value: %d)\n",
4009 f->thread_queue_size);
4012 if (ret != AVERROR_EOF)
4013 av_log(f->ctx, AV_LOG_ERROR,
4014 "Unable to send packet to main thread: %s\n",
4016 av_packet_unref(&pkt);
4017 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4025 static void free_input_threads(void)
4029 for (i = 0; i < nb_input_files; i++) {
4030 InputFile *f = input_files[i];
4033 if (!f || !f->in_thread_queue)
4035 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4036 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4037 av_packet_unref(&pkt);
4039 pthread_join(f->thread, NULL);
4041 av_thread_message_queue_free(&f->in_thread_queue);
4045 static int init_input_threads(void)
4049 if (nb_input_files == 1)
4052 for (i = 0; i < nb_input_files; i++) {
4053 InputFile *f = input_files[i];
4055 if (f->ctx->pb ? !f->ctx->pb->seekable :
4056 strcmp(f->ctx->iformat->name, "lavfi"))
4057 f->non_blocking = 1;
4058 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4059 f->thread_queue_size, sizeof(AVPacket));
4063 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4064 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4065 av_thread_message_queue_free(&f->in_thread_queue);
4066 return AVERROR(ret);
4072 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4074 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4076 AV_THREAD_MESSAGE_NONBLOCK : 0);
4080 static int get_input_packet(InputFile *f, AVPacket *pkt)
4084 for (i = 0; i < f->nb_streams; i++) {
4085 InputStream *ist = input_streams[f->ist_index + i];
4086 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4087 int64_t now = av_gettime_relative() - ist->start;
4089 return AVERROR(EAGAIN);
4094 if (nb_input_files > 1)
4095 return get_input_packet_mt(f, pkt);
4097 return av_read_frame(f->ctx, pkt);
4100 static int got_eagain(void)
4103 for (i = 0; i < nb_output_streams; i++)
4104 if (output_streams[i]->unavailable)
4109 static void reset_eagain(void)
4112 for (i = 0; i < nb_input_files; i++)
4113 input_files[i]->eagain = 0;
4114 for (i = 0; i < nb_output_streams; i++)
4115 output_streams[i]->unavailable = 0;
4118 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4119 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4120 AVRational time_base)
4126 return tmp_time_base;
4129 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4132 return tmp_time_base;
4138 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4141 AVCodecContext *avctx;
4142 int i, ret, has_audio = 0;
4143 int64_t duration = 0;
4145 ret = av_seek_frame(is, -1, is->start_time, 0);
4149 for (i = 0; i < ifile->nb_streams; i++) {
4150 ist = input_streams[ifile->ist_index + i];
4151 avctx = ist->dec_ctx;
4154 if (ist->decoding_needed) {
4155 process_input_packet(ist, NULL, 1);
4156 avcodec_flush_buffers(avctx);
4159 /* duration is the length of the last frame in a stream
4160 * when audio stream is present we don't care about
4161 * last video frame length because it's not defined exactly */
4162 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4166 for (i = 0; i < ifile->nb_streams; i++) {
4167 ist = input_streams[ifile->ist_index + i];
4168 avctx = ist->dec_ctx;
4171 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4172 AVRational sample_rate = {1, avctx->sample_rate};
4174 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4178 if (ist->framerate.num) {
4179 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4180 } else if (ist->st->avg_frame_rate.num) {
4181 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4182 } else duration = 1;
4184 if (!ifile->duration)
4185 ifile->time_base = ist->st->time_base;
4186 /* the total duration of the stream, max_pts - min_pts is
4187 * the duration of the stream without the last frame */
4188 duration += ist->max_pts - ist->min_pts;
4189 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4193 if (ifile->loop > 0)
4201 * - 0 -- one packet was read and processed
4202 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4203 * this function should be called again
4204 * - AVERROR_EOF -- this function should not be called again
4206 static int process_input(int file_index)
4208 InputFile *ifile = input_files[file_index];
4209 AVFormatContext *is;
4217 ret = get_input_packet(ifile, &pkt);
4219 if (ret == AVERROR(EAGAIN)) {
4223 if (ret < 0 && ifile->loop) {
4224 if ((ret = seek_to_start(ifile, is)) < 0)
4226 ret = get_input_packet(ifile, &pkt);
4227 if (ret == AVERROR(EAGAIN)) {
4233 if (ret != AVERROR_EOF) {
4234 print_error(is->filename, ret);
4239 for (i = 0; i < ifile->nb_streams; i++) {
4240 ist = input_streams[ifile->ist_index + i];
4241 if (ist->decoding_needed) {
4242 ret = process_input_packet(ist, NULL, 0);
4247 /* mark all outputs that don't go through lavfi as finished */
4248 for (j = 0; j < nb_output_streams; j++) {
4249 OutputStream *ost = output_streams[j];
4251 if (ost->source_index == ifile->ist_index + i &&
4252 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4253 finish_output_stream(ost);
4257 ifile->eof_reached = 1;
4258 return AVERROR(EAGAIN);
4264 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4265 is->streams[pkt.stream_index]);
4267 /* the following test is needed in case new streams appear
4268 dynamically in stream : we ignore them */
4269 if (pkt.stream_index >= ifile->nb_streams) {
4270 report_new_stream(file_index, &pkt);
4271 goto discard_packet;
4274 ist = input_streams[ifile->ist_index + pkt.stream_index];
4276 ist->data_size += pkt.size;
4280 goto discard_packet;
4282 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4283 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4288 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4289 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4290 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4291 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4292 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4293 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4294 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4295 av_ts2str(input_files[ist->file_index]->ts_offset),
4296 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4299 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4300 int64_t stime, stime2;
4301 // Correcting starttime based on the enabled streams
4302 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4303 // so we instead do it here as part of discontinuity handling
4304 if ( ist->next_dts == AV_NOPTS_VALUE
4305 && ifile->ts_offset == -is->start_time
4306 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4307 int64_t new_start_time = INT64_MAX;
4308 for (i=0; i<is->nb_streams; i++) {
4309 AVStream *st = is->streams[i];
4310 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4312 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4314 if (new_start_time > is->start_time) {
4315 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4316 ifile->ts_offset = -new_start_time;
4320 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4321 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4322 ist->wrap_correction_done = 1;
4324 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4325 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4326 ist->wrap_correction_done = 0;
4328 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4329 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4330 ist->wrap_correction_done = 0;
4334 /* add the stream-global side data to the first packet */
4335 if (ist->nb_packets == 1) {
4336 for (i = 0; i < ist->st->nb_side_data; i++) {
4337 AVPacketSideData *src_sd = &ist->st->side_data[i];
4340 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4343 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4346 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4350 memcpy(dst_data, src_sd->data, src_sd->size);
4354 if (pkt.dts != AV_NOPTS_VALUE)
4355 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4356 if (pkt.pts != AV_NOPTS_VALUE)
4357 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4359 if (pkt.pts != AV_NOPTS_VALUE)
4360 pkt.pts *= ist->ts_scale;
4361 if (pkt.dts != AV_NOPTS_VALUE)
4362 pkt.dts *= ist->ts_scale;
4364 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4365 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4366 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4367 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4368 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4369 int64_t delta = pkt_dts - ifile->last_ts;
4370 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4371 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4372 ifile->ts_offset -= delta;
4373 av_log(NULL, AV_LOG_DEBUG,
4374 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4375 delta, ifile->ts_offset);
4376 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4377 if (pkt.pts != AV_NOPTS_VALUE)
4378 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4382 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4383 if (pkt.pts != AV_NOPTS_VALUE) {
4384 pkt.pts += duration;
4385 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4386 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4389 if (pkt.dts != AV_NOPTS_VALUE)
4390 pkt.dts += duration;
4392 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4393 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4394 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4395 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4397 int64_t delta = pkt_dts - ist->next_dts;
4398 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4399 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4400 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4401 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4402 ifile->ts_offset -= delta;
4403 av_log(NULL, AV_LOG_DEBUG,
4404 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4405 delta, ifile->ts_offset);
4406 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4407 if (pkt.pts != AV_NOPTS_VALUE)
4408 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4411 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4412 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4413 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4414 pkt.dts = AV_NOPTS_VALUE;
4416 if (pkt.pts != AV_NOPTS_VALUE){
4417 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4418 delta = pkt_pts - ist->next_dts;
4419 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4420 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4421 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4422 pkt.pts = AV_NOPTS_VALUE;
4428 if (pkt.dts != AV_NOPTS_VALUE)
4429 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4432 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4433 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4434 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4435 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4436 av_ts2str(input_files[ist->file_index]->ts_offset),
4437 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4440 sub2video_heartbeat(ist, pkt.pts);
4442 process_input_packet(ist, &pkt, 0);
4445 av_packet_unref(&pkt);
4451 * Perform a step of transcoding for the specified filter graph.
4453 * @param[in] graph filter graph to consider
4454 * @param[out] best_ist input stream where a frame would allow to continue
4455 * @return 0 for success, <0 for error
4457 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4460 int nb_requests, nb_requests_max = 0;
4461 InputFilter *ifilter;
4465 ret = avfilter_graph_request_oldest(graph->graph);
4467 return reap_filters(0);
4469 if (ret == AVERROR_EOF) {
4470 ret = reap_filters(1);
4471 for (i = 0; i < graph->nb_outputs; i++)
4472 close_output_stream(graph->outputs[i]->ost);
4475 if (ret != AVERROR(EAGAIN))
4478 for (i = 0; i < graph->nb_inputs; i++) {
4479 ifilter = graph->inputs[i];
4481 if (input_files[ist->file_index]->eagain ||
4482 input_files[ist->file_index]->eof_reached)
4484 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4485 if (nb_requests > nb_requests_max) {
4486 nb_requests_max = nb_requests;
4492 for (i = 0; i < graph->nb_outputs; i++)
4493 graph->outputs[i]->ost->unavailable = 1;
4499 * Run a single step of transcoding.
4501 * @return 0 for success, <0 for error
4503 static int transcode_step(void)
4506 InputStream *ist = NULL;
4509 ost = choose_output();
4516 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4520 if (ost->filter && !ost->filter->graph->graph) {
4521 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4522 ret = configure_filtergraph(ost->filter->graph);
4524 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4530 if (ost->filter && ost->filter->graph->graph) {
4531 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4535 } else if (ost->filter) {
4537 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4538 InputFilter *ifilter = ost->filter->graph->inputs[i];
4539 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4545 ost->inputs_done = 1;
4549 av_assert0(ost->source_index >= 0);
4550 ist = input_streams[ost->source_index];
4553 ret = process_input(ist->file_index);
4554 if (ret == AVERROR(EAGAIN)) {
4555 if (input_files[ist->file_index]->eagain)
4556 ost->unavailable = 1;
4561 return ret == AVERROR_EOF ? 0 : ret;
4563 return reap_filters(0);
4567 * The following code is the main loop of the file converter
4569 static int transcode(void)
4572 AVFormatContext *os;
4575 int64_t timer_start;
4576 int64_t total_packets_written = 0;
4578 ret = transcode_init();
4582 if (stdin_interaction) {
4583 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4586 timer_start = av_gettime_relative();
4589 if ((ret = init_input_threads()) < 0)
4593 while (!received_sigterm) {
4594 int64_t cur_time= av_gettime_relative();
4596 /* if 'q' pressed, exits */
4597 if (stdin_interaction)
4598 if (check_keyboard_interaction(cur_time) < 0)
4601 /* check if there's any stream where output is still needed */
4602 if (!need_output()) {
4603 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4607 ret = transcode_step();
4608 if (ret < 0 && ret != AVERROR_EOF) {
4610 av_strerror(ret, errbuf, sizeof(errbuf));
4612 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4616 /* dump report by using the output first video and audio streams */
4617 print_report(0, timer_start, cur_time);
4620 free_input_threads();
4623 /* at the end of stream, we must flush the decoder buffers */
4624 for (i = 0; i < nb_input_streams; i++) {
4625 ist = input_streams[i];
4626 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4627 process_input_packet(ist, NULL, 0);
4634 /* write the trailer if needed and close file */
4635 for (i = 0; i < nb_output_files; i++) {
4636 os = output_files[i]->ctx;
4637 if (!output_files[i]->header_written) {
4638 av_log(NULL, AV_LOG_ERROR,
4639 "Nothing was written into output file %d (%s), because "
4640 "at least one of its streams received no packets.\n",
4644 if ((ret = av_write_trailer(os)) < 0) {
4645 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4651 /* dump report by using the first video and audio streams */
4652 print_report(1, timer_start, av_gettime_relative());
4654 /* close each encoder */
4655 for (i = 0; i < nb_output_streams; i++) {
4656 ost = output_streams[i];
4657 if (ost->encoding_needed) {
4658 av_freep(&ost->enc_ctx->stats_in);
4660 total_packets_written += ost->packets_written;
4663 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4664 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4668 /* close each decoder */
4669 for (i = 0; i < nb_input_streams; i++) {
4670 ist = input_streams[i];
4671 if (ist->decoding_needed) {
4672 avcodec_close(ist->dec_ctx);
4673 if (ist->hwaccel_uninit)
4674 ist->hwaccel_uninit(ist->dec_ctx);
4678 av_buffer_unref(&hw_device_ctx);
4679 hw_device_free_all();
4686 free_input_threads();
4689 if (output_streams) {
4690 for (i = 0; i < nb_output_streams; i++) {
4691 ost = output_streams[i];
4694 if (fclose(ost->logfile))
4695 av_log(NULL, AV_LOG_ERROR,
4696 "Error closing logfile, loss of information possible: %s\n",
4697 av_err2str(AVERROR(errno)));
4698 ost->logfile = NULL;
4700 av_freep(&ost->forced_kf_pts);
4701 av_freep(&ost->apad);
4702 av_freep(&ost->disposition);
4703 av_dict_free(&ost->encoder_opts);
4704 av_dict_free(&ost->sws_dict);
4705 av_dict_free(&ost->swr_opts);
4706 av_dict_free(&ost->resample_opts);
4714 static int64_t getutime(void)
4717 struct rusage rusage;
4719 getrusage(RUSAGE_SELF, &rusage);
4720 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4721 #elif HAVE_GETPROCESSTIMES
4723 FILETIME c, e, k, u;
4724 proc = GetCurrentProcess();
4725 GetProcessTimes(proc, &c, &e, &k, &u);
4726 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4728 return av_gettime_relative();
4732 static int64_t getmaxrss(void)
4734 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4735 struct rusage rusage;
4736 getrusage(RUSAGE_SELF, &rusage);
4737 return (int64_t)rusage.ru_maxrss * 1024;
4738 #elif HAVE_GETPROCESSMEMORYINFO
4740 PROCESS_MEMORY_COUNTERS memcounters;
4741 proc = GetCurrentProcess();
4742 memcounters.cb = sizeof(memcounters);
4743 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4744 return memcounters.PeakPagefileUsage;
4750 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4754 int main(int argc, char **argv)
4761 register_exit(ffmpeg_cleanup);
4763 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4765 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4766 parse_loglevel(argc, argv, options);
4768 if(argc>1 && !strcmp(argv[1], "-d")){
4770 av_log_set_callback(log_callback_null);
4775 avcodec_register_all();
4777 avdevice_register_all();
4779 avfilter_register_all();
4781 avformat_network_init();
4783 show_banner(argc, argv, options);
4785 /* parse options and open all input/output files */
4786 ret = ffmpeg_parse_options(argc, argv);
4790 if (nb_output_files <= 0 && nb_input_files == 0) {
4792 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4796 /* file converter / grab */
4797 if (nb_output_files <= 0) {
4798 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4802 // if (nb_input_files == 0) {
4803 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4807 for (i = 0; i < nb_output_files; i++) {
4808 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4812 current_time = ti = getutime();
4813 if (transcode() < 0)
4815 ti = getutime() - ti;
4817 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4819 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4820 decode_error_stat[0], decode_error_stat[1]);
4821 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4824 exit_program(received_nb_signals ? 255 : main_return_code);
4825 return main_return_code;