2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/hwcontext.h"
51 #include "libavutil/internal.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/display.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
128 static int ifilter_has_all_input_formats(FilterGraph *fg);
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static unsigned dup_warning = 1000;
133 static int nb_frames_drop = 0;
134 static int64_t decode_error_stat[2];
136 static int want_sdp = 1;
138 static int current_time;
139 AVIOContext *progress_avio = NULL;
141 static uint8_t *subtitle_out;
143 InputStream **input_streams = NULL;
144 int nb_input_streams = 0;
145 InputFile **input_files = NULL;
146 int nb_input_files = 0;
148 OutputStream **output_streams = NULL;
149 int nb_output_streams = 0;
150 OutputFile **output_files = NULL;
151 int nb_output_files = 0;
153 FilterGraph **filtergraphs;
158 /* init terminal so that we can grab keys */
159 static struct termios oldtty;
160 static int restore_tty;
164 static void free_input_threads(void);
168 Convert subtitles to video with alpha to insert them in filter graphs.
169 This is a temporary solution until libavfilter gets real subtitles support.
172 static int sub2video_get_blank_frame(InputStream *ist)
175 AVFrame *frame = ist->sub2video.frame;
177 av_frame_unref(frame);
178 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
179 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
180 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
181 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
187 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
190 uint32_t *pal, *dst2;
194 if (r->type != SUBTITLE_BITMAP) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
198 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
200 r->x, r->y, r->w, r->h, w, h
205 dst += r->y * dst_linesize + r->x * 4;
207 pal = (uint32_t *)r->data[1];
208 for (y = 0; y < r->h; y++) {
209 dst2 = (uint32_t *)dst;
211 for (x = 0; x < r->w; x++)
212 *(dst2++) = pal[*(src2++)];
214 src += r->linesize[0];
218 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 AVFrame *frame = ist->sub2video.frame;
223 av_assert1(frame->data[0]);
224 ist->sub2video.last_pts = frame->pts = pts;
225 for (i = 0; i < ist->nb_filters; i++)
226 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
227 AV_BUFFERSRC_FLAG_KEEP_REF |
228 AV_BUFFERSRC_FLAG_PUSH);
231 void sub2video_update(InputStream *ist, AVSubtitle *sub)
233 AVFrame *frame = ist->sub2video.frame;
237 int64_t pts, end_pts;
242 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
243 AV_TIME_BASE_Q, ist->st->time_base);
244 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
245 AV_TIME_BASE_Q, ist->st->time_base);
246 num_rects = sub->num_rects;
248 pts = ist->sub2video.end_pts;
252 if (sub2video_get_blank_frame(ist) < 0) {
253 av_log(ist->dec_ctx, AV_LOG_ERROR,
254 "Impossible to get a blank canvas.\n");
257 dst = frame->data [0];
258 dst_linesize = frame->linesize[0];
259 for (i = 0; i < num_rects; i++)
260 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
261 sub2video_push_ref(ist, pts);
262 ist->sub2video.end_pts = end_pts;
265 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 InputFile *infile = input_files[ist->file_index];
271 /* When a frame is read from a file, examine all sub2video streams in
272 the same file and send the sub2video frame again. Otherwise, decoded
273 video frames could be accumulating in the filter graph while a filter
274 (possibly overlay) is desperately waiting for a subtitle frame. */
275 for (i = 0; i < infile->nb_streams; i++) {
276 InputStream *ist2 = input_streams[infile->ist_index + i];
277 if (!ist2->sub2video.frame)
279 /* subtitles seem to be usually muxed ahead of other streams;
280 if not, subtracting a larger time here is necessary */
281 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
282 /* do not send the heartbeat frame if the subtitle is already ahead */
283 if (pts2 <= ist2->sub2video.last_pts)
285 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
286 sub2video_update(ist2, NULL);
287 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
288 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290 sub2video_push_ref(ist2, pts2);
294 static void sub2video_flush(InputStream *ist)
298 if (ist->sub2video.end_pts < INT64_MAX)
299 sub2video_update(ist, NULL);
300 for (i = 0; i < ist->nb_filters; i++)
301 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
304 /* end of sub2video hack */
306 static void term_exit_sigsafe(void)
310 tcsetattr (0, TCSANOW, &oldtty);
316 av_log(NULL, AV_LOG_QUIET, "%s", "");
320 static volatile int received_sigterm = 0;
321 static volatile int received_nb_signals = 0;
322 static volatile int transcode_init_done = 0;
323 static volatile int ffmpeg_exited = 0;
324 static int main_return_code = 0;
327 sigterm_handler(int sig)
329 received_sigterm = sig;
330 received_nb_signals++;
332 if(received_nb_signals > 3) {
333 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
334 strlen("Received > 3 system signals, hard exiting\n"));
340 #if HAVE_SETCONSOLECTRLHANDLER
341 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
348 case CTRL_BREAK_EVENT:
349 sigterm_handler(SIGINT);
352 case CTRL_CLOSE_EVENT:
353 case CTRL_LOGOFF_EVENT:
354 case CTRL_SHUTDOWN_EVENT:
355 sigterm_handler(SIGTERM);
356 /* Basically, with these 3 events, when we return from this method the
357 process is hard terminated, so stall as long as we need to
358 to try and let the main thread(s) clean up and gracefully terminate
359 (we have at most 5 seconds, but should be done far before that). */
360 while (!ffmpeg_exited) {
366 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
375 if (!run_as_daemon && stdin_interaction) {
377 if (tcgetattr (0, &tty) == 0) {
381 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
382 |INLCR|IGNCR|ICRNL|IXON);
383 tty.c_oflag |= OPOST;
384 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
385 tty.c_cflag &= ~(CSIZE|PARENB);
390 tcsetattr (0, TCSANOW, &tty);
392 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
396 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
397 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 signal(SIGXCPU, sigterm_handler);
401 #if HAVE_SETCONSOLECTRLHANDLER
402 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
406 /* read a key without blocking */
407 static int read_key(void)
419 n = select(1, &rfds, NULL, NULL, &tv);
428 # if HAVE_PEEKNAMEDPIPE
430 static HANDLE input_handle;
433 input_handle = GetStdHandle(STD_INPUT_HANDLE);
434 is_pipe = !GetConsoleMode(input_handle, &dw);
438 /* When running under a GUI, you will end here. */
439 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
440 // input pipe may have been closed by the program that ran ffmpeg
458 static int decode_interrupt_cb(void *ctx)
460 return received_nb_signals > transcode_init_done;
463 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465 static void ffmpeg_cleanup(int ret)
470 int maxrss = getmaxrss() / 1024;
471 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
474 for (i = 0; i < nb_filtergraphs; i++) {
475 FilterGraph *fg = filtergraphs[i];
476 avfilter_graph_free(&fg->graph);
477 for (j = 0; j < fg->nb_inputs; j++) {
478 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
481 sizeof(frame), NULL);
482 av_frame_free(&frame);
484 av_fifo_free(fg->inputs[j]->frame_queue);
485 if (fg->inputs[j]->ist->sub2video.sub_queue) {
486 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
489 &sub, sizeof(sub), NULL);
490 avsubtitle_free(&sub);
492 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
494 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
495 av_freep(&fg->inputs[j]->name);
496 av_freep(&fg->inputs[j]);
498 av_freep(&fg->inputs);
499 for (j = 0; j < fg->nb_outputs; j++) {
500 av_freep(&fg->outputs[j]->name);
501 av_freep(&fg->outputs[j]->formats);
502 av_freep(&fg->outputs[j]->channel_layouts);
503 av_freep(&fg->outputs[j]->sample_rates);
504 av_freep(&fg->outputs[j]);
506 av_freep(&fg->outputs);
507 av_freep(&fg->graph_desc);
509 av_freep(&filtergraphs[i]);
511 av_freep(&filtergraphs);
513 av_freep(&subtitle_out);
516 for (i = 0; i < nb_output_files; i++) {
517 OutputFile *of = output_files[i];
522 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524 avformat_free_context(s);
525 av_dict_free(&of->opts);
527 av_freep(&output_files[i]);
529 for (i = 0; i < nb_output_streams; i++) {
530 OutputStream *ost = output_streams[i];
535 for (j = 0; j < ost->nb_bitstream_filters; j++)
536 av_bsf_free(&ost->bsf_ctx[j]);
537 av_freep(&ost->bsf_ctx);
538 av_freep(&ost->bsf_extradata_updated);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && transcode_init_done) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
677 if (!of->header_written) {
678 AVPacket tmp_pkt = {0};
679 /* the muxer is not initialized yet, buffer the packet */
680 if (!av_fifo_space(ost->muxing_queue)) {
681 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
682 ost->max_muxing_queue_size);
683 if (new_size <= av_fifo_size(ost->muxing_queue)) {
684 av_log(NULL, AV_LOG_ERROR,
685 "Too many packets buffered for output stream %d:%d.\n",
686 ost->file_index, ost->st->index);
689 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
693 ret = av_packet_ref(&tmp_pkt, pkt);
696 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
697 av_packet_unref(pkt);
701 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
702 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
703 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
706 * Audio encoders may split the packets -- #frames in != #packets out.
707 * But there is no reordering, so we can limit the number of output packets
708 * by simply dropping them here.
709 * Counting encoded video frames needs to be done separately because of
710 * reordering, see do_video_out()
712 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
713 if (ost->frame_number >= ost->max_frames) {
714 av_packet_unref(pkt);
719 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
721 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
723 ost->quality = sd ? AV_RL32(sd) : -1;
724 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
726 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
728 ost->error[i] = AV_RL64(sd + 8 + 8*i);
733 if (ost->frame_rate.num && ost->is_cfr) {
734 if (pkt->duration > 0)
735 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
736 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
741 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
743 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
744 if (pkt->dts != AV_NOPTS_VALUE &&
745 pkt->pts != AV_NOPTS_VALUE &&
746 pkt->dts > pkt->pts) {
747 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
749 ost->file_index, ost->st->index);
751 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
752 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
753 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
755 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
756 pkt->dts != AV_NOPTS_VALUE &&
757 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
758 ost->last_mux_dts != AV_NOPTS_VALUE) {
759 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
760 if (pkt->dts < max) {
761 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
762 av_log(s, loglevel, "Non-monotonous DTS in output stream "
763 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
764 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
766 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
769 av_log(s, loglevel, "changing to %"PRId64". This may result "
770 "in incorrect timestamps in the output file.\n",
772 if (pkt->pts >= pkt->dts)
773 pkt->pts = FFMAX(pkt->pts, max);
778 ost->last_mux_dts = pkt->dts;
780 ost->data_size += pkt->size;
781 ost->packets_written++;
783 pkt->stream_index = ost->index;
786 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
787 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
788 av_get_media_type_string(ost->enc_ctx->codec_type),
789 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
790 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
795 ret = av_interleaved_write_frame(s, pkt);
797 print_error("av_interleaved_write_frame()", ret);
798 main_return_code = 1;
799 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
801 av_packet_unref(pkt);
804 static void close_output_stream(OutputStream *ost)
806 OutputFile *of = output_files[ost->file_index];
808 ost->finished |= ENCODER_FINISHED;
810 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
811 of->recording_time = FFMIN(of->recording_time, end);
815 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
819 /* apply the output bitstream filters, if any */
820 if (ost->nb_bitstream_filters) {
823 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
829 /* get a packet from the previous filter up the chain */
830 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
831 if (ret == AVERROR(EAGAIN)) {
837 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
838 * the api states this shouldn't happen after init(). Propagate it here to the
839 * muxer and to the next filters in the chain to workaround this.
840 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
841 * par_out->extradata and adapt muxers accordingly to get rid of this. */
842 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
843 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
846 ost->bsf_extradata_updated[idx - 1] |= 1;
849 /* send it to the next filter down the chain or to the muxer */
850 if (idx < ost->nb_bitstream_filters) {
851 /* HACK/FIXME! - See above */
852 if (!(ost->bsf_extradata_updated[idx] & 2)) {
853 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
856 ost->bsf_extradata_updated[idx] |= 2;
858 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
863 write_packet(of, pkt, ost);
866 write_packet(of, pkt, ost);
869 if (ret < 0 && ret != AVERROR_EOF) {
870 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
871 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
877 static int check_recording_time(OutputStream *ost)
879 OutputFile *of = output_files[ost->file_index];
881 if (of->recording_time != INT64_MAX &&
882 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
883 AV_TIME_BASE_Q) >= 0) {
884 close_output_stream(ost);
890 static void do_audio_out(OutputFile *of, OutputStream *ost,
893 AVCodecContext *enc = ost->enc_ctx;
897 av_init_packet(&pkt);
901 if (!check_recording_time(ost))
904 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
905 frame->pts = ost->sync_opts;
906 ost->sync_opts = frame->pts + frame->nb_samples;
907 ost->samples_encoded += frame->nb_samples;
908 ost->frames_encoded++;
910 av_assert0(pkt.size || !pkt.data);
911 update_benchmark(NULL);
913 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
914 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
915 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
916 enc->time_base.num, enc->time_base.den);
919 ret = avcodec_send_frame(enc, frame);
924 ret = avcodec_receive_packet(enc, &pkt);
925 if (ret == AVERROR(EAGAIN))
930 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
932 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
935 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
936 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
937 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
938 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
941 output_packet(of, &pkt, ost);
946 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
950 static void do_subtitle_out(OutputFile *of,
954 int subtitle_out_max_size = 1024 * 1024;
955 int subtitle_out_size, nb, i;
960 if (sub->pts == AV_NOPTS_VALUE) {
961 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
970 subtitle_out = av_malloc(subtitle_out_max_size);
972 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
977 /* Note: DVB subtitle need one packet to draw them and one other
978 packet to clear them */
979 /* XXX: signal it in the codec context ? */
980 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
985 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
987 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
988 pts -= output_files[ost->file_index]->start_time;
989 for (i = 0; i < nb; i++) {
990 unsigned save_num_rects = sub->num_rects;
992 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
993 if (!check_recording_time(ost))
997 // start_display_time is required to be 0
998 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
999 sub->end_display_time -= sub->start_display_time;
1000 sub->start_display_time = 0;
1004 ost->frames_encoded++;
1006 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1007 subtitle_out_max_size, sub);
1009 sub->num_rects = save_num_rects;
1010 if (subtitle_out_size < 0) {
1011 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1015 av_init_packet(&pkt);
1016 pkt.data = subtitle_out;
1017 pkt.size = subtitle_out_size;
1018 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1019 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1020 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1021 /* XXX: the pts correction is handled here. Maybe handling
1022 it in the codec would be better */
1024 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1026 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029 output_packet(of, &pkt, ost);
1033 static void do_video_out(OutputFile *of,
1035 AVFrame *next_picture,
1038 int ret, format_video_sync;
1040 AVCodecContext *enc = ost->enc_ctx;
1041 AVCodecParameters *mux_par = ost->st->codecpar;
1042 AVRational frame_rate;
1043 int nb_frames, nb0_frames, i;
1044 double delta, delta0;
1045 double duration = 0;
1047 InputStream *ist = NULL;
1048 AVFilterContext *filter = ost->filter->filter;
1050 if (ost->source_index >= 0)
1051 ist = input_streams[ost->source_index];
1053 frame_rate = av_buffersink_get_frame_rate(filter);
1054 if (frame_rate.num > 0 && frame_rate.den > 0)
1055 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1057 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1058 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1060 if (!ost->filters_script &&
1064 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1065 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1068 if (!next_picture) {
1070 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1071 ost->last_nb0_frames[1],
1072 ost->last_nb0_frames[2]);
1074 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1075 delta = delta0 + duration;
1077 /* by default, we output a single frame */
1078 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1081 format_video_sync = video_sync_method;
1082 if (format_video_sync == VSYNC_AUTO) {
1083 if(!strcmp(of->ctx->oformat->name, "avi")) {
1084 format_video_sync = VSYNC_VFR;
1086 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1088 && format_video_sync == VSYNC_CFR
1089 && input_files[ist->file_index]->ctx->nb_streams == 1
1090 && input_files[ist->file_index]->input_ts_offset == 0) {
1091 format_video_sync = VSYNC_VSCFR;
1093 if (format_video_sync == VSYNC_CFR && copy_ts) {
1094 format_video_sync = VSYNC_VSCFR;
1097 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1101 format_video_sync != VSYNC_PASSTHROUGH &&
1102 format_video_sync != VSYNC_DROP) {
1103 if (delta0 < -0.6) {
1104 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1106 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1107 sync_ipts = ost->sync_opts;
1112 switch (format_video_sync) {
1114 if (ost->frame_number == 0 && delta0 >= 0.5) {
1115 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1118 ost->sync_opts = lrint(sync_ipts);
1121 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1122 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1124 } else if (delta < -1.1)
1126 else if (delta > 1.1) {
1127 nb_frames = lrintf(delta);
1129 nb0_frames = lrintf(delta0 - 0.6);
1135 else if (delta > 0.6)
1136 ost->sync_opts = lrint(sync_ipts);
1139 case VSYNC_PASSTHROUGH:
1140 ost->sync_opts = lrint(sync_ipts);
1147 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1148 nb0_frames = FFMIN(nb0_frames, nb_frames);
1150 memmove(ost->last_nb0_frames + 1,
1151 ost->last_nb0_frames,
1152 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1153 ost->last_nb0_frames[0] = nb0_frames;
1155 if (nb0_frames == 0 && ost->last_dropped) {
1157 av_log(NULL, AV_LOG_VERBOSE,
1158 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1159 ost->frame_number, ost->st->index, ost->last_frame->pts);
1161 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1162 if (nb_frames > dts_error_threshold * 30) {
1163 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1167 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1168 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1169 if (nb_frames_dup > dup_warning) {
1170 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1174 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1176 /* duplicates frame if needed */
1177 for (i = 0; i < nb_frames; i++) {
1178 AVFrame *in_picture;
1179 av_init_packet(&pkt);
1183 if (i < nb0_frames && ost->last_frame) {
1184 in_picture = ost->last_frame;
1186 in_picture = next_picture;
1191 in_picture->pts = ost->sync_opts;
1194 if (!check_recording_time(ost))
1196 if (ost->frame_number >= ost->max_frames)
1200 #if FF_API_LAVF_FMT_RAWPICTURE
1201 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1202 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1203 /* raw pictures are written as AVPicture structure to
1204 avoid any copies. We support temporarily the older
1206 if (in_picture->interlaced_frame)
1207 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1209 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1210 pkt.data = (uint8_t *)in_picture;
1211 pkt.size = sizeof(AVPicture);
1212 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1213 pkt.flags |= AV_PKT_FLAG_KEY;
1215 output_packet(of, &pkt, ost);
1219 int forced_keyframe = 0;
1222 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1223 ost->top_field_first >= 0)
1224 in_picture->top_field_first = !!ost->top_field_first;
1226 if (in_picture->interlaced_frame) {
1227 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1228 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1232 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1234 in_picture->quality = enc->global_quality;
1235 in_picture->pict_type = 0;
1237 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1238 in_picture->pts * av_q2d(enc->time_base) : NAN;
1239 if (ost->forced_kf_index < ost->forced_kf_count &&
1240 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1241 ost->forced_kf_index++;
1242 forced_keyframe = 1;
1243 } else if (ost->forced_keyframes_pexpr) {
1245 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1246 res = av_expr_eval(ost->forced_keyframes_pexpr,
1247 ost->forced_keyframes_expr_const_values, NULL);
1248 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1249 ost->forced_keyframes_expr_const_values[FKF_N],
1250 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1252 ost->forced_keyframes_expr_const_values[FKF_T],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1256 forced_keyframe = 1;
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1258 ost->forced_keyframes_expr_const_values[FKF_N];
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1260 ost->forced_keyframes_expr_const_values[FKF_T];
1261 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1264 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1265 } else if ( ost->forced_keyframes
1266 && !strncmp(ost->forced_keyframes, "source", 6)
1267 && in_picture->key_frame==1) {
1268 forced_keyframe = 1;
1271 if (forced_keyframe) {
1272 in_picture->pict_type = AV_PICTURE_TYPE_I;
1273 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276 update_benchmark(NULL);
1278 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1279 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1280 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1281 enc->time_base.num, enc->time_base.den);
1284 ost->frames_encoded++;
1286 ret = avcodec_send_frame(enc, in_picture);
1291 ret = avcodec_receive_packet(enc, &pkt);
1292 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1293 if (ret == AVERROR(EAGAIN))
1299 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1300 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1301 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1302 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1305 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1306 pkt.pts = ost->sync_opts;
1308 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1311 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1312 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1313 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1314 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1317 frame_size = pkt.size;
1318 output_packet(of, &pkt, ost);
1320 /* if two pass, output log */
1321 if (ost->logfile && enc->stats_out) {
1322 fprintf(ost->logfile, "%s", enc->stats_out);
1328 * For video, number of frames in == number of packets out.
1329 * But there may be reordering, so we can't throw away frames on encoder
1330 * flush, we need to limit them here, before they go into encoder.
1332 ost->frame_number++;
1334 if (vstats_filename && frame_size)
1335 do_video_stats(ost, frame_size);
1338 if (!ost->last_frame)
1339 ost->last_frame = av_frame_alloc();
1340 av_frame_unref(ost->last_frame);
1341 if (next_picture && ost->last_frame)
1342 av_frame_ref(ost->last_frame, next_picture);
1344 av_frame_free(&ost->last_frame);
1348 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352 static double psnr(double d)
1354 return -10.0 * log10(d);
1357 static void do_video_stats(OutputStream *ost, int frame_size)
1359 AVCodecContext *enc;
1361 double ti1, bitrate, avg_bitrate;
1363 /* this is executed just the first time do_video_stats is called */
1365 vstats_file = fopen(vstats_filename, "w");
1373 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1374 frame_number = ost->st->nb_frames;
1375 if (vstats_version <= 1) {
1376 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1377 ost->quality / (float)FF_QP2LAMBDA);
1379 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1380 ost->quality / (float)FF_QP2LAMBDA);
1383 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1384 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1386 fprintf(vstats_file,"f_size= %6d ", frame_size);
1387 /* compute pts value */
1388 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1393 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1394 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1395 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1396 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1402 static void finish_output_stream(OutputStream *ost)
1404 OutputFile *of = output_files[ost->file_index];
1407 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1410 for (i = 0; i < of->ctx->nb_streams; i++)
1411 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1416 * Get and encode new output from any of the filtergraphs, without causing
1419 * @return 0 for success, <0 for severe errors
1421 static int reap_filters(int flush)
1423 AVFrame *filtered_frame = NULL;
1426 /* Reap all buffers present in the buffer sinks */
1427 for (i = 0; i < nb_output_streams; i++) {
1428 OutputStream *ost = output_streams[i];
1429 OutputFile *of = output_files[ost->file_index];
1430 AVFilterContext *filter;
1431 AVCodecContext *enc = ost->enc_ctx;
1434 if (!ost->filter || !ost->filter->graph->graph)
1436 filter = ost->filter->filter;
1438 if (!ost->initialized) {
1439 char error[1024] = "";
1440 ret = init_output_stream(ost, error, sizeof(error));
1442 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1443 ost->file_index, ost->index, error);
1448 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1449 return AVERROR(ENOMEM);
1451 filtered_frame = ost->filtered_frame;
1454 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1455 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1456 AV_BUFFERSINK_FLAG_NO_REQUEST);
1458 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1459 av_log(NULL, AV_LOG_WARNING,
1460 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1461 } else if (flush && ret == AVERROR_EOF) {
1462 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1463 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1467 if (ost->finished) {
1468 av_frame_unref(filtered_frame);
1471 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1472 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1473 AVRational filter_tb = av_buffersink_get_time_base(filter);
1474 AVRational tb = enc->time_base;
1475 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1477 tb.den <<= extra_bits;
1479 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1480 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1481 float_pts /= 1 << extra_bits;
1482 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1483 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1485 filtered_frame->pts =
1486 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1487 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1489 //if (ost->source_index >= 0)
1490 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1492 switch (av_buffersink_get_type(filter)) {
1493 case AVMEDIA_TYPE_VIDEO:
1494 if (!ost->frame_aspect_ratio.num)
1495 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1498 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1499 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501 enc->time_base.num, enc->time_base.den);
1504 do_video_out(of, ost, filtered_frame, float_pts);
1506 case AVMEDIA_TYPE_AUDIO:
1507 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1508 enc->channels != av_frame_get_channels(filtered_frame)) {
1509 av_log(NULL, AV_LOG_ERROR,
1510 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1513 do_audio_out(of, ost, filtered_frame);
1516 // TODO support subtitle filters
1520 av_frame_unref(filtered_frame);
1527 static void print_final_stats(int64_t total_size)
1529 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1530 uint64_t subtitle_size = 0;
1531 uint64_t data_size = 0;
1532 float percent = -1.0;
1536 for (i = 0; i < nb_output_streams; i++) {
1537 OutputStream *ost = output_streams[i];
1538 switch (ost->enc_ctx->codec_type) {
1539 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1541 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1542 default: other_size += ost->data_size; break;
1544 extra_size += ost->enc_ctx->extradata_size;
1545 data_size += ost->data_size;
1546 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1547 != AV_CODEC_FLAG_PASS1)
1551 if (data_size && total_size>0 && total_size >= data_size)
1552 percent = 100.0 * (total_size - data_size) / data_size;
1554 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1555 video_size / 1024.0,
1556 audio_size / 1024.0,
1557 subtitle_size / 1024.0,
1558 other_size / 1024.0,
1559 extra_size / 1024.0);
1561 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563 av_log(NULL, AV_LOG_INFO, "unknown");
1564 av_log(NULL, AV_LOG_INFO, "\n");
1566 /* print verbose per-stream stats */
1567 for (i = 0; i < nb_input_files; i++) {
1568 InputFile *f = input_files[i];
1569 uint64_t total_packets = 0, total_size = 0;
1571 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1572 i, f->ctx->filename);
1574 for (j = 0; j < f->nb_streams; j++) {
1575 InputStream *ist = input_streams[f->ist_index + j];
1576 enum AVMediaType type = ist->dec_ctx->codec_type;
1578 total_size += ist->data_size;
1579 total_packets += ist->nb_packets;
1581 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1582 i, j, media_type_string(type));
1583 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1584 ist->nb_packets, ist->data_size);
1586 if (ist->decoding_needed) {
1587 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1588 ist->frames_decoded);
1589 if (type == AVMEDIA_TYPE_AUDIO)
1590 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1591 av_log(NULL, AV_LOG_VERBOSE, "; ");
1594 av_log(NULL, AV_LOG_VERBOSE, "\n");
1597 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1598 total_packets, total_size);
1601 for (i = 0; i < nb_output_files; i++) {
1602 OutputFile *of = output_files[i];
1603 uint64_t total_packets = 0, total_size = 0;
1605 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1606 i, of->ctx->filename);
1608 for (j = 0; j < of->ctx->nb_streams; j++) {
1609 OutputStream *ost = output_streams[of->ost_index + j];
1610 enum AVMediaType type = ost->enc_ctx->codec_type;
1612 total_size += ost->data_size;
1613 total_packets += ost->packets_written;
1615 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1616 i, j, media_type_string(type));
1617 if (ost->encoding_needed) {
1618 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1619 ost->frames_encoded);
1620 if (type == AVMEDIA_TYPE_AUDIO)
1621 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1622 av_log(NULL, AV_LOG_VERBOSE, "; ");
1625 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1626 ost->packets_written, ost->data_size);
1628 av_log(NULL, AV_LOG_VERBOSE, "\n");
1631 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1632 total_packets, total_size);
1634 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1635 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637 av_log(NULL, AV_LOG_WARNING, "\n");
1639 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1644 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1647 AVBPrint buf_script;
1649 AVFormatContext *oc;
1651 AVCodecContext *enc;
1652 int frame_number, vid, i;
1655 int64_t pts = INT64_MIN + 1;
1656 static int64_t last_time = -1;
1657 static int qp_histogram[52];
1658 int hours, mins, secs, us;
1662 if (!print_stats && !is_last_report && !progress_avio)
1665 if (!is_last_report) {
1666 if (last_time == -1) {
1667 last_time = cur_time;
1670 if ((cur_time - last_time) < 500000)
1672 last_time = cur_time;
1675 t = (cur_time-timer_start) / 1000000.0;
1678 oc = output_files[0]->ctx;
1680 total_size = avio_size(oc->pb);
1681 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1682 total_size = avio_tell(oc->pb);
1686 av_bprint_init(&buf_script, 0, 1);
1687 for (i = 0; i < nb_output_streams; i++) {
1689 ost = output_streams[i];
1691 if (!ost->stream_copy)
1692 q = ost->quality / (float) FF_QP2LAMBDA;
1694 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1696 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1697 ost->file_index, ost->index, q);
1699 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702 frame_number = ost->frame_number;
1703 fps = t > 1 ? frame_number / t : 0;
1704 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1705 frame_number, fps < 9.95, fps, q);
1706 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1707 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1708 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1709 ost->file_index, ost->index, q);
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1715 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717 for (j = 0; j < 32; j++)
1718 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1721 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723 double error, error_sum = 0;
1724 double scale, scale_sum = 0;
1726 char type[3] = { 'Y','U','V' };
1727 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1728 for (j = 0; j < 3; j++) {
1729 if (is_last_report) {
1730 error = enc->error[j];
1731 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733 error = ost->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0;
1740 p = psnr(error / scale);
1741 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1742 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1743 ost->file_index, ost->index, type[j] | 32, p);
1745 p = psnr(error_sum / scale_sum);
1746 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1747 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1748 ost->file_index, ost->index, p);
1752 /* compute min output value */
1753 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1754 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1755 ost->st->time_base, AV_TIME_BASE_Q));
1757 nb_frames_drop += ost->last_dropped;
1760 secs = FFABS(pts) / AV_TIME_BASE;
1761 us = FFABS(pts) % AV_TIME_BASE;
1767 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1768 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1770 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1772 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1773 "size=%8.0fkB time=", total_size / 1024.0);
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1776 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1777 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1778 (100 * us) / AV_TIME_BASE);
1781 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1782 av_bprintf(&buf_script, "bitrate=N/A\n");
1784 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1785 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1788 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1789 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1790 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1791 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1792 hours, mins, secs, us);
1794 if (nb_frames_dup || nb_frames_drop)
1795 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1796 nb_frames_dup, nb_frames_drop);
1797 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1798 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1801 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1802 av_bprintf(&buf_script, "speed=N/A\n");
1804 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1805 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1808 if (print_stats || is_last_report) {
1809 const char end = is_last_report ? '\n' : '\r';
1810 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1811 fprintf(stderr, "%s %c", buf, end);
1813 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1818 if (progress_avio) {
1819 av_bprintf(&buf_script, "progress=%s\n",
1820 is_last_report ? "end" : "continue");
1821 avio_write(progress_avio, buf_script.str,
1822 FFMIN(buf_script.len, buf_script.size - 1));
1823 avio_flush(progress_avio);
1824 av_bprint_finalize(&buf_script, NULL);
1825 if (is_last_report) {
1826 if ((ret = avio_closep(&progress_avio)) < 0)
1827 av_log(NULL, AV_LOG_ERROR,
1828 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1833 print_final_stats(total_size);
1836 static void flush_encoders(void)
1840 for (i = 0; i < nb_output_streams; i++) {
1841 OutputStream *ost = output_streams[i];
1842 AVCodecContext *enc = ost->enc_ctx;
1843 OutputFile *of = output_files[ost->file_index];
1845 if (!ost->encoding_needed)
1848 // Try to enable encoding with no input frames.
1849 // Maybe we should just let encoding fail instead.
1850 if (!ost->initialized) {
1851 FilterGraph *fg = ost->filter->graph;
1852 char error[1024] = "";
1854 av_log(NULL, AV_LOG_WARNING,
1855 "Finishing stream %d:%d without any data written to it.\n",
1856 ost->file_index, ost->st->index);
1858 if (ost->filter && !fg->graph) {
1860 for (x = 0; x < fg->nb_inputs; x++) {
1861 InputFilter *ifilter = fg->inputs[x];
1862 if (ifilter->format < 0) {
1863 AVCodecParameters *par = ifilter->ist->st->codecpar;
1864 // We never got any input. Set a fake format, which will
1865 // come from libavformat.
1866 ifilter->format = par->format;
1867 ifilter->sample_rate = par->sample_rate;
1868 ifilter->channels = par->channels;
1869 ifilter->channel_layout = par->channel_layout;
1870 ifilter->width = par->width;
1871 ifilter->height = par->height;
1872 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1876 if (!ifilter_has_all_input_formats(fg))
1879 ret = configure_filtergraph(fg);
1881 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1885 finish_output_stream(ost);
1888 ret = init_output_stream(ost, error, sizeof(error));
1890 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891 ost->file_index, ost->index, error);
1896 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1898 #if FF_API_LAVF_FMT_RAWPICTURE
1899 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1903 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1906 avcodec_send_frame(enc, NULL);
1909 const char *desc = NULL;
1913 switch (enc->codec_type) {
1914 case AVMEDIA_TYPE_AUDIO:
1917 case AVMEDIA_TYPE_VIDEO:
1924 av_init_packet(&pkt);
1928 update_benchmark(NULL);
1929 ret = avcodec_receive_packet(enc, &pkt);
1930 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1931 if (ret < 0 && ret != AVERROR_EOF) {
1932 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1937 if (ost->logfile && enc->stats_out) {
1938 fprintf(ost->logfile, "%s", enc->stats_out);
1940 if (ret == AVERROR_EOF) {
1943 if (ost->finished & MUXER_FINISHED) {
1944 av_packet_unref(&pkt);
1947 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1948 pkt_size = pkt.size;
1949 output_packet(of, &pkt, ost);
1950 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1951 do_video_stats(ost, pkt_size);
1958 * Check whether a packet from ist should be written into ost at this time
1960 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1962 OutputFile *of = output_files[ost->file_index];
1963 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1965 if (ost->source_index != ist_index)
1971 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1977 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1979 OutputFile *of = output_files[ost->file_index];
1980 InputFile *f = input_files [ist->file_index];
1981 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1982 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1986 av_init_packet(&opkt);
1988 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1989 !ost->copy_initial_nonkeyframes)
1992 if (!ost->frame_number && !ost->copy_prior_start) {
1993 int64_t comp_start = start_time;
1994 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1995 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1996 if (pkt->pts == AV_NOPTS_VALUE ?
1997 ist->pts < comp_start :
1998 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2002 if (of->recording_time != INT64_MAX &&
2003 ist->pts >= of->recording_time + start_time) {
2004 close_output_stream(ost);
2008 if (f->recording_time != INT64_MAX) {
2009 start_time = f->ctx->start_time;
2010 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2011 start_time += f->start_time;
2012 if (ist->pts >= f->recording_time + start_time) {
2013 close_output_stream(ost);
2018 /* force the input stream PTS */
2019 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2022 if (pkt->pts != AV_NOPTS_VALUE)
2023 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2025 opkt.pts = AV_NOPTS_VALUE;
2027 if (pkt->dts == AV_NOPTS_VALUE)
2028 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2030 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2031 opkt.dts -= ost_tb_start_time;
2033 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2034 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2036 duration = ist->dec_ctx->frame_size;
2037 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2038 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2039 ost->mux_timebase) - ost_tb_start_time;
2042 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2044 opkt.flags = pkt->flags;
2045 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2046 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2047 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2048 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2049 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2051 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2052 &opkt.data, &opkt.size,
2053 pkt->data, pkt->size,
2054 pkt->flags & AV_PKT_FLAG_KEY);
2056 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2061 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2066 opkt.data = pkt->data;
2067 opkt.size = pkt->size;
2069 av_copy_packet_side_data(&opkt, pkt);
2071 #if FF_API_LAVF_FMT_RAWPICTURE
2072 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2073 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2074 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2075 /* store AVPicture in AVPacket, as expected by the output format */
2076 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2078 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2082 opkt.data = (uint8_t *)&pict;
2083 opkt.size = sizeof(AVPicture);
2084 opkt.flags |= AV_PKT_FLAG_KEY;
2088 output_packet(of, &opkt, ost);
2091 int guess_input_channel_layout(InputStream *ist)
2093 AVCodecContext *dec = ist->dec_ctx;
2095 if (!dec->channel_layout) {
2096 char layout_name[256];
2098 if (dec->channels > ist->guess_layout_max)
2100 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2101 if (!dec->channel_layout)
2103 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2104 dec->channels, dec->channel_layout);
2105 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2106 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2111 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2113 if (*got_output || ret<0)
2114 decode_error_stat[ret<0] ++;
2116 if (ret < 0 && exit_on_error)
2119 if (exit_on_error && *got_output && ist) {
2120 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2121 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2127 // Filters can be configured only if the formats of all inputs are known.
2128 static int ifilter_has_all_input_formats(FilterGraph *fg)
2131 for (i = 0; i < fg->nb_inputs; i++) {
2132 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2133 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2139 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2141 FilterGraph *fg = ifilter->graph;
2142 int need_reinit, ret, i;
2144 /* determine if the parameters for this input changed */
2145 need_reinit = ifilter->format != frame->format;
2146 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2147 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2150 switch (ifilter->ist->st->codecpar->codec_type) {
2151 case AVMEDIA_TYPE_AUDIO:
2152 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2153 ifilter->channels != frame->channels ||
2154 ifilter->channel_layout != frame->channel_layout;
2156 case AVMEDIA_TYPE_VIDEO:
2157 need_reinit |= ifilter->width != frame->width ||
2158 ifilter->height != frame->height;
2163 ret = ifilter_parameters_from_frame(ifilter, frame);
2168 /* (re)init the graph if possible, otherwise buffer the frame and return */
2169 if (need_reinit || !fg->graph) {
2170 for (i = 0; i < fg->nb_inputs; i++) {
2171 if (!ifilter_has_all_input_formats(fg)) {
2172 AVFrame *tmp = av_frame_clone(frame);
2174 return AVERROR(ENOMEM);
2175 av_frame_unref(frame);
2177 if (!av_fifo_space(ifilter->frame_queue)) {
2178 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2182 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2187 ret = reap_filters(1);
2188 if (ret < 0 && ret != AVERROR_EOF) {
2190 av_strerror(ret, errbuf, sizeof(errbuf));
2192 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2196 ret = configure_filtergraph(fg);
2198 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2203 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2212 static int ifilter_send_eof(InputFilter *ifilter)
2218 if (ifilter->filter) {
2219 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2223 // the filtergraph was never configured
2224 FilterGraph *fg = ifilter->graph;
2225 for (i = 0; i < fg->nb_inputs; i++)
2226 if (!fg->inputs[i]->eof)
2228 if (i == fg->nb_inputs) {
2229 // All the input streams have finished without the filtergraph
2230 // ever being configured.
2231 // Mark the output streams as finished.
2232 for (j = 0; j < fg->nb_outputs; j++)
2233 finish_output_stream(fg->outputs[j]->ost);
2240 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2241 // There is the following difference: if you got a frame, you must call
2242 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2243 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2244 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2251 ret = avcodec_send_packet(avctx, pkt);
2252 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2253 // decoded frames with avcodec_receive_frame() until done.
2254 if (ret < 0 && ret != AVERROR_EOF)
2258 ret = avcodec_receive_frame(avctx, frame);
2259 if (ret < 0 && ret != AVERROR(EAGAIN))
2267 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2272 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2273 for (i = 0; i < ist->nb_filters; i++) {
2274 if (i < ist->nb_filters - 1) {
2275 f = ist->filter_frame;
2276 ret = av_frame_ref(f, decoded_frame);
2281 ret = ifilter_send_frame(ist->filters[i], f);
2282 if (ret == AVERROR_EOF)
2283 ret = 0; /* ignore */
2285 av_log(NULL, AV_LOG_ERROR,
2286 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2293 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2296 AVFrame *decoded_frame;
2297 AVCodecContext *avctx = ist->dec_ctx;
2299 AVRational decoded_frame_tb;
2301 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2302 return AVERROR(ENOMEM);
2303 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2304 return AVERROR(ENOMEM);
2305 decoded_frame = ist->decoded_frame;
2307 update_benchmark(NULL);
2308 ret = decode(avctx, decoded_frame, got_output, pkt);
2309 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2313 if (ret >= 0 && avctx->sample_rate <= 0) {
2314 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2315 ret = AVERROR_INVALIDDATA;
2318 if (ret != AVERROR_EOF)
2319 check_decode_result(ist, got_output, ret);
2321 if (!*got_output || ret < 0)
2324 ist->samples_decoded += decoded_frame->nb_samples;
2325 ist->frames_decoded++;
2328 /* increment next_dts to use for the case where the input stream does not
2329 have timestamps or there are multiple frames in the packet */
2330 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2332 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2336 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2337 decoded_frame_tb = ist->st->time_base;
2338 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2339 decoded_frame->pts = pkt->pts;
2340 decoded_frame_tb = ist->st->time_base;
2342 decoded_frame->pts = ist->dts;
2343 decoded_frame_tb = AV_TIME_BASE_Q;
2345 if (decoded_frame->pts != AV_NOPTS_VALUE)
2346 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2347 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2348 (AVRational){1, avctx->sample_rate});
2349 ist->nb_samples = decoded_frame->nb_samples;
2350 err = send_frame_to_filters(ist, decoded_frame);
2352 av_frame_unref(ist->filter_frame);
2353 av_frame_unref(decoded_frame);
2354 return err < 0 ? err : ret;
2357 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2360 AVFrame *decoded_frame;
2361 int i, ret = 0, err = 0;
2362 int64_t best_effort_timestamp;
2363 int64_t dts = AV_NOPTS_VALUE;
2366 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2367 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2369 if (!eof && pkt && pkt->size == 0)
2372 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2373 return AVERROR(ENOMEM);
2374 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2375 return AVERROR(ENOMEM);
2376 decoded_frame = ist->decoded_frame;
2377 if (ist->dts != AV_NOPTS_VALUE)
2378 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2381 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2384 // The old code used to set dts on the drain packet, which does not work
2385 // with the new API anymore.
2387 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2389 return AVERROR(ENOMEM);
2390 ist->dts_buffer = new;
2391 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2394 update_benchmark(NULL);
2395 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2396 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2400 // The following line may be required in some cases where there is no parser
2401 // or the parser does not has_b_frames correctly
2402 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2403 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2404 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2406 av_log(ist->dec_ctx, AV_LOG_WARNING,
2407 "video_delay is larger in decoder than demuxer %d > %d.\n"
2408 "If you want to help, upload a sample "
2409 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2410 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2411 ist->dec_ctx->has_b_frames,
2412 ist->st->codecpar->video_delay);
2415 if (ret != AVERROR_EOF)
2416 check_decode_result(ist, got_output, ret);
2418 if (*got_output && ret >= 0) {
2419 if (ist->dec_ctx->width != decoded_frame->width ||
2420 ist->dec_ctx->height != decoded_frame->height ||
2421 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2422 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2423 decoded_frame->width,
2424 decoded_frame->height,
2425 decoded_frame->format,
2426 ist->dec_ctx->width,
2427 ist->dec_ctx->height,
2428 ist->dec_ctx->pix_fmt);
2432 if (!*got_output || ret < 0)
2435 if(ist->top_field_first>=0)
2436 decoded_frame->top_field_first = ist->top_field_first;
2438 ist->frames_decoded++;
2440 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2441 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2445 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2447 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2449 if (ist->framerate.num)
2450 best_effort_timestamp = ist->cfr_next_pts++;
2452 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2453 best_effort_timestamp = ist->dts_buffer[0];
2455 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2456 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2457 ist->nb_dts_buffer--;
2460 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2461 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2463 if (ts != AV_NOPTS_VALUE)
2464 ist->next_pts = ist->pts = ts;
2468 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2469 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2470 ist->st->index, av_ts2str(decoded_frame->pts),
2471 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2472 best_effort_timestamp,
2473 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2474 decoded_frame->key_frame, decoded_frame->pict_type,
2475 ist->st->time_base.num, ist->st->time_base.den);
2478 if (ist->st->sample_aspect_ratio.num)
2479 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2481 err = send_frame_to_filters(ist, decoded_frame);
2484 av_frame_unref(ist->filter_frame);
2485 av_frame_unref(decoded_frame);
2486 return err < 0 ? err : ret;
2489 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2492 AVSubtitle subtitle;
2494 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2495 &subtitle, got_output, pkt);
2497 check_decode_result(NULL, got_output, ret);
2499 if (ret < 0 || !*got_output) {
2502 sub2video_flush(ist);
2506 if (ist->fix_sub_duration) {
2508 if (ist->prev_sub.got_output) {
2509 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2510 1000, AV_TIME_BASE);
2511 if (end < ist->prev_sub.subtitle.end_display_time) {
2512 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2513 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2514 ist->prev_sub.subtitle.end_display_time, end,
2515 end <= 0 ? ", dropping it" : "");
2516 ist->prev_sub.subtitle.end_display_time = end;
2519 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2520 FFSWAP(int, ret, ist->prev_sub.ret);
2521 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2529 if (ist->sub2video.frame) {
2530 sub2video_update(ist, &subtitle);
2531 } else if (ist->nb_filters) {
2532 if (!ist->sub2video.sub_queue)
2533 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2534 if (!ist->sub2video.sub_queue)
2536 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2537 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2541 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2545 if (!subtitle.num_rects)
2548 ist->frames_decoded++;
2550 for (i = 0; i < nb_output_streams; i++) {
2551 OutputStream *ost = output_streams[i];
2553 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2554 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2557 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2562 avsubtitle_free(&subtitle);
2566 static int send_filter_eof(InputStream *ist)
2569 for (i = 0; i < ist->nb_filters; i++) {
2570 ret = ifilter_send_eof(ist->filters[i]);
2577 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2578 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2582 int eof_reached = 0;
2585 if (!ist->saw_first_ts) {
2586 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2588 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2589 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2590 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2592 ist->saw_first_ts = 1;
2595 if (ist->next_dts == AV_NOPTS_VALUE)
2596 ist->next_dts = ist->dts;
2597 if (ist->next_pts == AV_NOPTS_VALUE)
2598 ist->next_pts = ist->pts;
2602 av_init_packet(&avpkt);
2609 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2610 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2611 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2612 ist->next_pts = ist->pts = ist->dts;
2615 // while we have more to decode or while the decoder did output something on EOF
2616 while (ist->decoding_needed) {
2619 int decode_failed = 0;
2621 ist->pts = ist->next_pts;
2622 ist->dts = ist->next_dts;
2624 switch (ist->dec_ctx->codec_type) {
2625 case AVMEDIA_TYPE_AUDIO:
2626 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2629 case AVMEDIA_TYPE_VIDEO:
2630 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2632 if (!repeating || !pkt || got_output) {
2633 if (pkt && pkt->duration) {
2634 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2635 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2636 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2637 duration = ((int64_t)AV_TIME_BASE *
2638 ist->dec_ctx->framerate.den * ticks) /
2639 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2642 if(ist->dts != AV_NOPTS_VALUE && duration) {
2643 ist->next_dts += duration;
2645 ist->next_dts = AV_NOPTS_VALUE;
2649 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2651 case AVMEDIA_TYPE_SUBTITLE:
2654 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2655 if (!pkt && ret >= 0)
2662 if (ret == AVERROR_EOF) {
2668 if (decode_failed) {
2669 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2670 ist->file_index, ist->st->index, av_err2str(ret));
2672 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2673 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2675 if (!decode_failed || exit_on_error)
2681 ist->got_output = 1;
2686 // During draining, we might get multiple output frames in this loop.
2687 // ffmpeg.c does not drain the filter chain on configuration changes,
2688 // which means if we send multiple frames at once to the filters, and
2689 // one of those frames changes configuration, the buffered frames will
2690 // be lost. This can upset certain FATE tests.
2691 // Decode only 1 frame per call on EOF to appease these FATE tests.
2692 // The ideal solution would be to rewrite decoding to use the new
2693 // decoding API in a better way.
2700 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2701 /* except when looping we need to flush but not to send an EOF */
2702 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2703 int ret = send_filter_eof(ist);
2705 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2710 /* handle stream copy */
2711 if (!ist->decoding_needed) {
2712 ist->dts = ist->next_dts;
2713 switch (ist->dec_ctx->codec_type) {
2714 case AVMEDIA_TYPE_AUDIO:
2715 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2716 ist->dec_ctx->sample_rate;
2718 case AVMEDIA_TYPE_VIDEO:
2719 if (ist->framerate.num) {
2720 // TODO: Remove work-around for c99-to-c89 issue 7
2721 AVRational time_base_q = AV_TIME_BASE_Q;
2722 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2723 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2724 } else if (pkt->duration) {
2725 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2726 } else if(ist->dec_ctx->framerate.num != 0) {
2727 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2728 ist->next_dts += ((int64_t)AV_TIME_BASE *
2729 ist->dec_ctx->framerate.den * ticks) /
2730 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2734 ist->pts = ist->dts;
2735 ist->next_pts = ist->next_dts;
2737 for (i = 0; pkt && i < nb_output_streams; i++) {
2738 OutputStream *ost = output_streams[i];
2740 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2743 do_streamcopy(ist, ost, pkt);
2746 return !eof_reached;
2749 static void print_sdp(void)
2754 AVIOContext *sdp_pb;
2755 AVFormatContext **avc;
2757 for (i = 0; i < nb_output_files; i++) {
2758 if (!output_files[i]->header_written)
2762 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2765 for (i = 0, j = 0; i < nb_output_files; i++) {
2766 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2767 avc[j] = output_files[i]->ctx;
2775 av_sdp_create(avc, j, sdp, sizeof(sdp));
2777 if (!sdp_filename) {
2778 printf("SDP:\n%s\n", sdp);
2781 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2782 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2784 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2785 avio_closep(&sdp_pb);
2786 av_freep(&sdp_filename);
2794 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2797 for (i = 0; hwaccels[i].name; i++)
2798 if (hwaccels[i].pix_fmt == pix_fmt)
2799 return &hwaccels[i];
2803 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2805 InputStream *ist = s->opaque;
2806 const enum AVPixelFormat *p;
2809 for (p = pix_fmts; *p != -1; p++) {
2810 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2811 const HWAccel *hwaccel;
2813 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2816 hwaccel = get_hwaccel(*p);
2818 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2819 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2822 ret = hwaccel->init(s);
2824 if (ist->hwaccel_id == hwaccel->id) {
2825 av_log(NULL, AV_LOG_FATAL,
2826 "%s hwaccel requested for input stream #%d:%d, "
2827 "but cannot be initialized.\n", hwaccel->name,
2828 ist->file_index, ist->st->index);
2829 return AV_PIX_FMT_NONE;
2834 if (ist->hw_frames_ctx) {
2835 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2836 if (!s->hw_frames_ctx)
2837 return AV_PIX_FMT_NONE;
2840 ist->active_hwaccel_id = hwaccel->id;
2841 ist->hwaccel_pix_fmt = *p;
2848 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2850 InputStream *ist = s->opaque;
2852 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2853 return ist->hwaccel_get_buffer(s, frame, flags);
2855 return avcodec_default_get_buffer2(s, frame, flags);
2858 static int init_input_stream(int ist_index, char *error, int error_len)
2861 InputStream *ist = input_streams[ist_index];
2863 if (ist->decoding_needed) {
2864 AVCodec *codec = ist->dec;
2866 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2867 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2868 return AVERROR(EINVAL);
2871 ist->dec_ctx->opaque = ist;
2872 ist->dec_ctx->get_format = get_format;
2873 ist->dec_ctx->get_buffer2 = get_buffer;
2874 ist->dec_ctx->thread_safe_callbacks = 1;
2876 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2877 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2878 (ist->decoding_needed & DECODING_FOR_OST)) {
2879 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2880 if (ist->decoding_needed & DECODING_FOR_FILTER)
2881 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2884 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2886 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2887 * audio, and video decoders such as cuvid or mediacodec */
2888 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2890 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2891 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2892 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2893 if (ret == AVERROR_EXPERIMENTAL)
2894 abort_codec_experimental(codec, 0);
2896 snprintf(error, error_len,
2897 "Error while opening decoder for input stream "
2899 ist->file_index, ist->st->index, av_err2str(ret));
2902 assert_avoptions(ist->decoder_opts);
2905 ist->next_pts = AV_NOPTS_VALUE;
2906 ist->next_dts = AV_NOPTS_VALUE;
2911 static InputStream *get_input_stream(OutputStream *ost)
2913 if (ost->source_index >= 0)
2914 return input_streams[ost->source_index];
2918 static int compare_int64(const void *a, const void *b)
2920 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2923 /* open the muxer when all the streams are initialized */
2924 static int check_init_output_file(OutputFile *of, int file_index)
2928 for (i = 0; i < of->ctx->nb_streams; i++) {
2929 OutputStream *ost = output_streams[of->ost_index + i];
2930 if (!ost->initialized)
2934 of->ctx->interrupt_callback = int_cb;
2936 ret = avformat_write_header(of->ctx, &of->opts);
2938 av_log(NULL, AV_LOG_ERROR,
2939 "Could not write header for output file #%d "
2940 "(incorrect codec parameters ?): %s\n",
2941 file_index, av_err2str(ret));
2944 //assert_avoptions(of->opts);
2945 of->header_written = 1;
2947 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2949 if (sdp_filename || want_sdp)
2952 /* flush the muxing queues */
2953 for (i = 0; i < of->ctx->nb_streams; i++) {
2954 OutputStream *ost = output_streams[of->ost_index + i];
2956 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2957 if (!av_fifo_size(ost->muxing_queue))
2958 ost->mux_timebase = ost->st->time_base;
2960 while (av_fifo_size(ost->muxing_queue)) {
2962 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2963 write_packet(of, &pkt, ost);
2970 static int init_output_bsfs(OutputStream *ost)
2975 if (!ost->nb_bitstream_filters)
2978 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2979 ctx = ost->bsf_ctx[i];
2981 ret = avcodec_parameters_copy(ctx->par_in,
2982 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2986 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2988 ret = av_bsf_init(ctx);
2990 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2991 ost->bsf_ctx[i]->filter->name);
2996 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2997 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3001 ost->st->time_base = ctx->time_base_out;
3006 static int init_output_stream_streamcopy(OutputStream *ost)
3008 OutputFile *of = output_files[ost->file_index];
3009 InputStream *ist = get_input_stream(ost);
3010 AVCodecParameters *par_dst = ost->st->codecpar;
3011 AVCodecParameters *par_src = ost->ref_par;
3014 uint32_t codec_tag = par_dst->codec_tag;
3016 av_assert0(ist && !ost->filter);
3018 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3020 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3022 av_log(NULL, AV_LOG_FATAL,
3023 "Error setting up codec context options.\n");
3026 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3029 unsigned int codec_tag_tmp;
3030 if (!of->ctx->oformat->codec_tag ||
3031 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3032 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3033 codec_tag = par_src->codec_tag;
3036 ret = avcodec_parameters_copy(par_dst, par_src);
3040 par_dst->codec_tag = codec_tag;
3042 if (!ost->frame_rate.num)
3043 ost->frame_rate = ist->framerate;
3044 ost->st->avg_frame_rate = ost->frame_rate;
3046 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3050 // copy timebase while removing common factors
3051 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3052 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3054 // copy estimated duration as a hint to the muxer
3055 if (ost->st->duration <= 0 && ist->st->duration > 0)
3056 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3059 ost->st->disposition = ist->st->disposition;
3061 if (ist->st->nb_side_data) {
3062 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3063 sizeof(*ist->st->side_data));
3064 if (!ost->st->side_data)
3065 return AVERROR(ENOMEM);
3067 ost->st->nb_side_data = 0;
3068 for (i = 0; i < ist->st->nb_side_data; i++) {
3069 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3070 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3072 sd_dst->data = av_malloc(sd_src->size);
3074 return AVERROR(ENOMEM);
3075 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3076 sd_dst->size = sd_src->size;
3077 sd_dst->type = sd_src->type;
3078 ost->st->nb_side_data++;
3082 if (ost->rotate_overridden) {
3083 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3084 sizeof(int32_t) * 9);
3086 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3089 ost->parser = av_parser_init(par_dst->codec_id);
3090 ost->parser_avctx = avcodec_alloc_context3(NULL);
3091 if (!ost->parser_avctx)
3092 return AVERROR(ENOMEM);
3094 switch (par_dst->codec_type) {
3095 case AVMEDIA_TYPE_AUDIO:
3096 if (audio_volume != 256) {
3097 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3100 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3101 par_dst->block_align= 0;
3102 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3103 par_dst->block_align= 0;
3105 case AVMEDIA_TYPE_VIDEO:
3106 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3108 av_mul_q(ost->frame_aspect_ratio,
3109 (AVRational){ par_dst->height, par_dst->width });
3110 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3111 "with stream copy may produce invalid files\n");
3113 else if (ist->st->sample_aspect_ratio.num)
3114 sar = ist->st->sample_aspect_ratio;
3116 sar = par_src->sample_aspect_ratio;
3117 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3118 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3119 ost->st->r_frame_rate = ist->st->r_frame_rate;
3123 ost->mux_timebase = ist->st->time_base;
3128 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3130 AVDictionaryEntry *e;
3132 uint8_t *encoder_string;
3133 int encoder_string_len;
3134 int format_flags = 0;
3135 int codec_flags = 0;
3137 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3140 e = av_dict_get(of->opts, "fflags", NULL, 0);
3142 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3145 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3147 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3149 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3152 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3155 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3156 encoder_string = av_mallocz(encoder_string_len);
3157 if (!encoder_string)
3160 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3161 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3163 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3164 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3165 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3166 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3169 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3170 AVCodecContext *avctx)
3173 int n = 1, i, size, index = 0;
3176 for (p = kf; *p; p++)
3180 pts = av_malloc_array(size, sizeof(*pts));
3182 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3187 for (i = 0; i < n; i++) {
3188 char *next = strchr(p, ',');
3193 if (!memcmp(p, "chapters", 8)) {
3195 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3198 if (avf->nb_chapters > INT_MAX - size ||
3199 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3201 av_log(NULL, AV_LOG_FATAL,
3202 "Could not allocate forced key frames array.\n");
3205 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3206 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3208 for (j = 0; j < avf->nb_chapters; j++) {
3209 AVChapter *c = avf->chapters[j];
3210 av_assert1(index < size);
3211 pts[index++] = av_rescale_q(c->start, c->time_base,
3212 avctx->time_base) + t;
3217 t = parse_time_or_die("force_key_frames", p, 1);
3218 av_assert1(index < size);
3219 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3226 av_assert0(index == size);
3227 qsort(pts, size, sizeof(*pts), compare_int64);
3228 ost->forced_kf_count = size;
3229 ost->forced_kf_pts = pts;
3232 static int init_output_stream_encode(OutputStream *ost)
3234 InputStream *ist = get_input_stream(ost);
3235 AVCodecContext *enc_ctx = ost->enc_ctx;
3236 AVCodecContext *dec_ctx = NULL;
3237 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3240 set_encoder_id(output_files[ost->file_index], ost);
3242 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3243 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3244 // which have to be filtered out to prevent leaking them to output files.
3245 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3248 ost->st->disposition = ist->st->disposition;
3250 dec_ctx = ist->dec_ctx;
3252 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3254 for (j = 0; j < oc->nb_streams; j++) {
3255 AVStream *st = oc->streams[j];
3256 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3259 if (j == oc->nb_streams)
3260 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3261 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3262 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3265 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3266 if (!ost->frame_rate.num)
3267 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3268 if (ist && !ost->frame_rate.num)
3269 ost->frame_rate = ist->framerate;
3270 if (ist && !ost->frame_rate.num)
3271 ost->frame_rate = ist->st->r_frame_rate;
3272 if (ist && !ost->frame_rate.num) {
3273 ost->frame_rate = (AVRational){25, 1};
3274 av_log(NULL, AV_LOG_WARNING,
3276 "about the input framerate is available. Falling "
3277 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3278 "if you want a different framerate.\n",
3279 ost->file_index, ost->index);
3281 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3282 if (ost->enc->supported_framerates && !ost->force_fps) {
3283 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3284 ost->frame_rate = ost->enc->supported_framerates[idx];
3286 // reduce frame rate for mpeg4 to be within the spec limits
3287 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3288 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3289 ost->frame_rate.num, ost->frame_rate.den, 65535);
3293 switch (enc_ctx->codec_type) {
3294 case AVMEDIA_TYPE_AUDIO:
3295 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3297 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3298 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3299 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3300 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3301 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3302 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3304 case AVMEDIA_TYPE_VIDEO:
3305 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3306 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3307 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3308 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3309 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3310 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3311 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3313 for (j = 0; j < ost->forced_kf_count; j++)
3314 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3316 enc_ctx->time_base);
3318 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3319 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3320 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3321 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3322 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3323 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3324 if (!strncmp(ost->enc->name, "libx264", 7) &&
3325 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3326 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3327 av_log(NULL, AV_LOG_WARNING,
3328 "No pixel format specified, %s for H.264 encoding chosen.\n"
3329 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3330 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3331 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3332 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3333 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3334 av_log(NULL, AV_LOG_WARNING,
3335 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3336 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3337 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3338 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3340 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3341 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3343 enc_ctx->framerate = ost->frame_rate;
3345 ost->st->avg_frame_rate = ost->frame_rate;
3348 enc_ctx->width != dec_ctx->width ||
3349 enc_ctx->height != dec_ctx->height ||
3350 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3351 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3354 if (ost->forced_keyframes) {
3355 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3356 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3357 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3359 av_log(NULL, AV_LOG_ERROR,
3360 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3363 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3364 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3365 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3366 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3368 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3369 // parse it only for static kf timings
3370 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3371 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3375 case AVMEDIA_TYPE_SUBTITLE:
3376 enc_ctx->time_base = AV_TIME_BASE_Q;
3377 if (!enc_ctx->width) {
3378 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3379 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3382 case AVMEDIA_TYPE_DATA:
3389 ost->mux_timebase = enc_ctx->time_base;
3394 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3398 if (ost->encoding_needed) {
3399 AVCodec *codec = ost->enc;
3400 AVCodecContext *dec = NULL;
3403 ret = init_output_stream_encode(ost);
3407 if ((ist = get_input_stream(ost)))
3409 if (dec && dec->subtitle_header) {
3410 /* ASS code assumes this buffer is null terminated so add extra byte. */
3411 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3412 if (!ost->enc_ctx->subtitle_header)
3413 return AVERROR(ENOMEM);
3414 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3415 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3417 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3418 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3419 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3421 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3422 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3423 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3425 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3426 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3427 av_buffersink_get_format(ost->filter->filter)) {
3428 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3429 if (!ost->enc_ctx->hw_frames_ctx)
3430 return AVERROR(ENOMEM);
3433 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3434 if (ret == AVERROR_EXPERIMENTAL)
3435 abort_codec_experimental(codec, 1);
3436 snprintf(error, error_len,
3437 "Error while opening encoder for output stream #%d:%d - "
3438 "maybe incorrect parameters such as bit_rate, rate, width or height",
3439 ost->file_index, ost->index);
3442 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3443 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3444 av_buffersink_set_frame_size(ost->filter->filter,
3445 ost->enc_ctx->frame_size);
3446 assert_avoptions(ost->encoder_opts);
3447 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3448 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3449 " It takes bits/s as argument, not kbits/s\n");
3451 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3453 av_log(NULL, AV_LOG_FATAL,
3454 "Error initializing the output stream codec context.\n");
3458 * FIXME: ost->st->codec should't be needed here anymore.
3460 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3464 if (ost->enc_ctx->nb_coded_side_data) {
3467 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3468 sizeof(*ost->st->side_data));
3469 if (!ost->st->side_data)
3470 return AVERROR(ENOMEM);
3472 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3473 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3474 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3476 sd_dst->data = av_malloc(sd_src->size);
3478 return AVERROR(ENOMEM);
3479 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3480 sd_dst->size = sd_src->size;
3481 sd_dst->type = sd_src->type;
3482 ost->st->nb_side_data++;
3487 * Add global input side data. For now this is naive, and copies it
3488 * from the input stream's global side data. All side data should
3489 * really be funneled over AVFrame and libavfilter, then added back to
3490 * packet side data, and then potentially using the first packet for
3495 for (i = 0; i < ist->st->nb_side_data; i++) {
3496 AVPacketSideData *sd = &ist->st->side_data[i];
3497 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3499 return AVERROR(ENOMEM);
3500 memcpy(dst, sd->data, sd->size);
3501 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3502 av_display_rotation_set((uint32_t *)dst, 0);
3506 // copy timebase while removing common factors
3507 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3508 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3510 // copy estimated duration as a hint to the muxer
3511 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3512 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3514 ost->st->codec->codec= ost->enc_ctx->codec;
3515 } else if (ost->stream_copy) {
3516 ret = init_output_stream_streamcopy(ost);
3521 * FIXME: will the codec context used by the parser during streamcopy
3522 * This should go away with the new parser API.
3524 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3529 // parse user provided disposition, and update stream values
3530 if (ost->disposition) {
3531 static const AVOption opts[] = {
3532 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3533 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3534 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3535 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3536 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3537 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3538 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3539 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3540 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3541 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3542 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3543 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3544 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3545 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3548 static const AVClass class = {
3550 .item_name = av_default_item_name,
3552 .version = LIBAVUTIL_VERSION_INT,
3554 const AVClass *pclass = &class;
3556 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3561 /* initialize bitstream filters for the output stream
3562 * needs to be done here, because the codec id for streamcopy is not
3563 * known until now */
3564 ret = init_output_bsfs(ost);
3568 ost->initialized = 1;
3570 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3577 static void report_new_stream(int input_index, AVPacket *pkt)
3579 InputFile *file = input_files[input_index];
3580 AVStream *st = file->ctx->streams[pkt->stream_index];
3582 if (pkt->stream_index < file->nb_streams_warn)
3584 av_log(file->ctx, AV_LOG_WARNING,
3585 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3586 av_get_media_type_string(st->codecpar->codec_type),
3587 input_index, pkt->stream_index,
3588 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3589 file->nb_streams_warn = pkt->stream_index + 1;
3592 static int transcode_init(void)
3594 int ret = 0, i, j, k;
3595 AVFormatContext *oc;
3598 char error[1024] = {0};
3600 for (i = 0; i < nb_filtergraphs; i++) {
3601 FilterGraph *fg = filtergraphs[i];
3602 for (j = 0; j < fg->nb_outputs; j++) {
3603 OutputFilter *ofilter = fg->outputs[j];
3604 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3606 if (fg->nb_inputs != 1)
3608 for (k = nb_input_streams-1; k >= 0 ; k--)
3609 if (fg->inputs[0]->ist == input_streams[k])
3611 ofilter->ost->source_index = k;
3615 /* init framerate emulation */
3616 for (i = 0; i < nb_input_files; i++) {
3617 InputFile *ifile = input_files[i];
3618 if (ifile->rate_emu)
3619 for (j = 0; j < ifile->nb_streams; j++)
3620 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3623 /* init input streams */
3624 for (i = 0; i < nb_input_streams; i++)
3625 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3626 for (i = 0; i < nb_output_streams; i++) {
3627 ost = output_streams[i];
3628 avcodec_close(ost->enc_ctx);
3633 /* open each encoder */
3634 for (i = 0; i < nb_output_streams; i++) {
3635 // skip streams fed from filtergraphs until we have a frame for them
3636 if (output_streams[i]->filter)
3639 ret = init_output_stream(output_streams[i], error, sizeof(error));
3644 /* discard unused programs */
3645 for (i = 0; i < nb_input_files; i++) {
3646 InputFile *ifile = input_files[i];
3647 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3648 AVProgram *p = ifile->ctx->programs[j];
3649 int discard = AVDISCARD_ALL;
3651 for (k = 0; k < p->nb_stream_indexes; k++)
3652 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3653 discard = AVDISCARD_DEFAULT;
3656 p->discard = discard;
3660 /* write headers for files with no streams */
3661 for (i = 0; i < nb_output_files; i++) {
3662 oc = output_files[i]->ctx;
3663 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3664 ret = check_init_output_file(output_files[i], i);
3671 /* dump the stream mapping */
3672 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3673 for (i = 0; i < nb_input_streams; i++) {
3674 ist = input_streams[i];
3676 for (j = 0; j < ist->nb_filters; j++) {
3677 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3678 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3679 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3680 ist->filters[j]->name);
3681 if (nb_filtergraphs > 1)
3682 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3683 av_log(NULL, AV_LOG_INFO, "\n");
3688 for (i = 0; i < nb_output_streams; i++) {
3689 ost = output_streams[i];
3691 if (ost->attachment_filename) {
3692 /* an attached file */
3693 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3694 ost->attachment_filename, ost->file_index, ost->index);
3698 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3699 /* output from a complex graph */
3700 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3701 if (nb_filtergraphs > 1)
3702 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3704 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3705 ost->index, ost->enc ? ost->enc->name : "?");
3709 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3710 input_streams[ost->source_index]->file_index,
3711 input_streams[ost->source_index]->st->index,
3714 if (ost->sync_ist != input_streams[ost->source_index])
3715 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3716 ost->sync_ist->file_index,
3717 ost->sync_ist->st->index);
3718 if (ost->stream_copy)
3719 av_log(NULL, AV_LOG_INFO, " (copy)");
3721 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3722 const AVCodec *out_codec = ost->enc;
3723 const char *decoder_name = "?";
3724 const char *in_codec_name = "?";
3725 const char *encoder_name = "?";
3726 const char *out_codec_name = "?";
3727 const AVCodecDescriptor *desc;
3730 decoder_name = in_codec->name;
3731 desc = avcodec_descriptor_get(in_codec->id);
3733 in_codec_name = desc->name;
3734 if (!strcmp(decoder_name, in_codec_name))
3735 decoder_name = "native";
3739 encoder_name = out_codec->name;
3740 desc = avcodec_descriptor_get(out_codec->id);
3742 out_codec_name = desc->name;
3743 if (!strcmp(encoder_name, out_codec_name))
3744 encoder_name = "native";
3747 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3748 in_codec_name, decoder_name,
3749 out_codec_name, encoder_name);
3751 av_log(NULL, AV_LOG_INFO, "\n");
3755 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3759 transcode_init_done = 1;
3764 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3765 static int need_output(void)
3769 for (i = 0; i < nb_output_streams; i++) {
3770 OutputStream *ost = output_streams[i];
3771 OutputFile *of = output_files[ost->file_index];
3772 AVFormatContext *os = output_files[ost->file_index]->ctx;
3774 if (ost->finished ||
3775 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3777 if (ost->frame_number >= ost->max_frames) {
3779 for (j = 0; j < of->ctx->nb_streams; j++)
3780 close_output_stream(output_streams[of->ost_index + j]);
3791 * Select the output stream to process.
3793 * @return selected output stream, or NULL if none available
3795 static OutputStream *choose_output(void)
3798 int64_t opts_min = INT64_MAX;
3799 OutputStream *ost_min = NULL;
3801 for (i = 0; i < nb_output_streams; i++) {
3802 OutputStream *ost = output_streams[i];
3803 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3804 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3806 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3807 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3809 if (!ost->initialized && !ost->inputs_done)
3812 if (!ost->finished && opts < opts_min) {
3814 ost_min = ost->unavailable ? NULL : ost;
3820 static void set_tty_echo(int on)
3824 if (tcgetattr(0, &tty) == 0) {
3825 if (on) tty.c_lflag |= ECHO;
3826 else tty.c_lflag &= ~ECHO;
3827 tcsetattr(0, TCSANOW, &tty);
3832 static int check_keyboard_interaction(int64_t cur_time)
3835 static int64_t last_time;
3836 if (received_nb_signals)
3837 return AVERROR_EXIT;
3838 /* read_key() returns 0 on EOF */
3839 if(cur_time - last_time >= 100000 && !run_as_daemon){
3841 last_time = cur_time;
3845 return AVERROR_EXIT;
3846 if (key == '+') av_log_set_level(av_log_get_level()+10);
3847 if (key == '-') av_log_set_level(av_log_get_level()-10);
3848 if (key == 's') qp_hist ^= 1;
3851 do_hex_dump = do_pkt_dump = 0;
3852 } else if(do_pkt_dump){
3856 av_log_set_level(AV_LOG_DEBUG);
3858 if (key == 'c' || key == 'C'){
3859 char buf[4096], target[64], command[256], arg[256] = {0};
3862 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3865 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3870 fprintf(stderr, "\n");
3872 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3873 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3874 target, time, command, arg);
3875 for (i = 0; i < nb_filtergraphs; i++) {
3876 FilterGraph *fg = filtergraphs[i];
3879 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3880 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3881 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3882 } else if (key == 'c') {
3883 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3884 ret = AVERROR_PATCHWELCOME;
3886 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3888 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3893 av_log(NULL, AV_LOG_ERROR,
3894 "Parse error, at least 3 arguments were expected, "
3895 "only %d given in string '%s'\n", n, buf);
3898 if (key == 'd' || key == 'D'){
3901 debug = input_streams[0]->st->codec->debug<<1;
3902 if(!debug) debug = 1;
3903 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3910 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3915 fprintf(stderr, "\n");
3916 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3917 fprintf(stderr,"error parsing debug value\n");
3919 for(i=0;i<nb_input_streams;i++) {
3920 input_streams[i]->st->codec->debug = debug;
3922 for(i=0;i<nb_output_streams;i++) {
3923 OutputStream *ost = output_streams[i];
3924 ost->enc_ctx->debug = debug;
3926 if(debug) av_log_set_level(AV_LOG_DEBUG);
3927 fprintf(stderr,"debug=%d\n", debug);
3930 fprintf(stderr, "key function\n"
3931 "? show this help\n"
3932 "+ increase verbosity\n"
3933 "- decrease verbosity\n"
3934 "c Send command to first matching filter supporting it\n"
3935 "C Send/Queue command to all matching filters\n"
3936 "D cycle through available debug modes\n"
3937 "h dump packets/hex press to cycle through the 3 states\n"
3939 "s Show QP histogram\n"
3946 static void *input_thread(void *arg)
3949 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3954 ret = av_read_frame(f->ctx, &pkt);
3956 if (ret == AVERROR(EAGAIN)) {
3961 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3964 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3965 if (flags && ret == AVERROR(EAGAIN)) {
3967 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3968 av_log(f->ctx, AV_LOG_WARNING,
3969 "Thread message queue blocking; consider raising the "
3970 "thread_queue_size option (current value: %d)\n",
3971 f->thread_queue_size);
3974 if (ret != AVERROR_EOF)
3975 av_log(f->ctx, AV_LOG_ERROR,
3976 "Unable to send packet to main thread: %s\n",
3978 av_packet_unref(&pkt);
3979 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3987 static void free_input_threads(void)
3991 for (i = 0; i < nb_input_files; i++) {
3992 InputFile *f = input_files[i];
3995 if (!f || !f->in_thread_queue)
3997 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3998 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3999 av_packet_unref(&pkt);
4001 pthread_join(f->thread, NULL);
4003 av_thread_message_queue_free(&f->in_thread_queue);
4007 static int init_input_threads(void)
4011 if (nb_input_files == 1)
4014 for (i = 0; i < nb_input_files; i++) {
4015 InputFile *f = input_files[i];
4017 if (f->ctx->pb ? !f->ctx->pb->seekable :
4018 strcmp(f->ctx->iformat->name, "lavfi"))
4019 f->non_blocking = 1;
4020 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4021 f->thread_queue_size, sizeof(AVPacket));
4025 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4026 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4027 av_thread_message_queue_free(&f->in_thread_queue);
4028 return AVERROR(ret);
4034 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4036 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4038 AV_THREAD_MESSAGE_NONBLOCK : 0);
4042 static int get_input_packet(InputFile *f, AVPacket *pkt)
4046 for (i = 0; i < f->nb_streams; i++) {
4047 InputStream *ist = input_streams[f->ist_index + i];
4048 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4049 int64_t now = av_gettime_relative() - ist->start;
4051 return AVERROR(EAGAIN);
4056 if (nb_input_files > 1)
4057 return get_input_packet_mt(f, pkt);
4059 return av_read_frame(f->ctx, pkt);
4062 static int got_eagain(void)
4065 for (i = 0; i < nb_output_streams; i++)
4066 if (output_streams[i]->unavailable)
4071 static void reset_eagain(void)
4074 for (i = 0; i < nb_input_files; i++)
4075 input_files[i]->eagain = 0;
4076 for (i = 0; i < nb_output_streams; i++)
4077 output_streams[i]->unavailable = 0;
4080 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4081 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4082 AVRational time_base)
4088 return tmp_time_base;
4091 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4094 return tmp_time_base;
4100 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4103 AVCodecContext *avctx;
4104 int i, ret, has_audio = 0;
4105 int64_t duration = 0;
4107 ret = av_seek_frame(is, -1, is->start_time, 0);
4111 for (i = 0; i < ifile->nb_streams; i++) {
4112 ist = input_streams[ifile->ist_index + i];
4113 avctx = ist->dec_ctx;
4116 if (ist->decoding_needed) {
4117 process_input_packet(ist, NULL, 1);
4118 avcodec_flush_buffers(avctx);
4121 /* duration is the length of the last frame in a stream
4122 * when audio stream is present we don't care about
4123 * last video frame length because it's not defined exactly */
4124 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4128 for (i = 0; i < ifile->nb_streams; i++) {
4129 ist = input_streams[ifile->ist_index + i];
4130 avctx = ist->dec_ctx;
4133 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4134 AVRational sample_rate = {1, avctx->sample_rate};
4136 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4140 if (ist->framerate.num) {
4141 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4142 } else if (ist->st->avg_frame_rate.num) {
4143 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4144 } else duration = 1;
4146 if (!ifile->duration)
4147 ifile->time_base = ist->st->time_base;
4148 /* the total duration of the stream, max_pts - min_pts is
4149 * the duration of the stream without the last frame */
4150 duration += ist->max_pts - ist->min_pts;
4151 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4155 if (ifile->loop > 0)
4163 * - 0 -- one packet was read and processed
4164 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4165 * this function should be called again
4166 * - AVERROR_EOF -- this function should not be called again
4168 static int process_input(int file_index)
4170 InputFile *ifile = input_files[file_index];
4171 AVFormatContext *is;
4179 ret = get_input_packet(ifile, &pkt);
4181 if (ret == AVERROR(EAGAIN)) {
4185 if (ret < 0 && ifile->loop) {
4186 if ((ret = seek_to_start(ifile, is)) < 0)
4188 ret = get_input_packet(ifile, &pkt);
4189 if (ret == AVERROR(EAGAIN)) {
4195 if (ret != AVERROR_EOF) {
4196 print_error(is->filename, ret);
4201 for (i = 0; i < ifile->nb_streams; i++) {
4202 ist = input_streams[ifile->ist_index + i];
4203 if (ist->decoding_needed) {
4204 ret = process_input_packet(ist, NULL, 0);
4209 /* mark all outputs that don't go through lavfi as finished */
4210 for (j = 0; j < nb_output_streams; j++) {
4211 OutputStream *ost = output_streams[j];
4213 if (ost->source_index == ifile->ist_index + i &&
4214 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4215 finish_output_stream(ost);
4219 ifile->eof_reached = 1;
4220 return AVERROR(EAGAIN);
4226 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4227 is->streams[pkt.stream_index]);
4229 /* the following test is needed in case new streams appear
4230 dynamically in stream : we ignore them */
4231 if (pkt.stream_index >= ifile->nb_streams) {
4232 report_new_stream(file_index, &pkt);
4233 goto discard_packet;
4236 ist = input_streams[ifile->ist_index + pkt.stream_index];
4238 ist->data_size += pkt.size;
4242 goto discard_packet;
4244 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4245 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4250 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4251 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4252 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4253 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4254 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4255 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4256 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4257 av_ts2str(input_files[ist->file_index]->ts_offset),
4258 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4261 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4262 int64_t stime, stime2;
4263 // Correcting starttime based on the enabled streams
4264 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4265 // so we instead do it here as part of discontinuity handling
4266 if ( ist->next_dts == AV_NOPTS_VALUE
4267 && ifile->ts_offset == -is->start_time
4268 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4269 int64_t new_start_time = INT64_MAX;
4270 for (i=0; i<is->nb_streams; i++) {
4271 AVStream *st = is->streams[i];
4272 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4274 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4276 if (new_start_time > is->start_time) {
4277 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4278 ifile->ts_offset = -new_start_time;
4282 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4283 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4284 ist->wrap_correction_done = 1;
4286 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4287 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4288 ist->wrap_correction_done = 0;
4290 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4291 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4292 ist->wrap_correction_done = 0;
4296 /* add the stream-global side data to the first packet */
4297 if (ist->nb_packets == 1) {
4298 for (i = 0; i < ist->st->nb_side_data; i++) {
4299 AVPacketSideData *src_sd = &ist->st->side_data[i];
4302 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4305 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4308 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4312 memcpy(dst_data, src_sd->data, src_sd->size);
4316 if (pkt.dts != AV_NOPTS_VALUE)
4317 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4318 if (pkt.pts != AV_NOPTS_VALUE)
4319 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4321 if (pkt.pts != AV_NOPTS_VALUE)
4322 pkt.pts *= ist->ts_scale;
4323 if (pkt.dts != AV_NOPTS_VALUE)
4324 pkt.dts *= ist->ts_scale;
4326 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4327 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4328 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4329 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4330 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4331 int64_t delta = pkt_dts - ifile->last_ts;
4332 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4333 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4334 ifile->ts_offset -= delta;
4335 av_log(NULL, AV_LOG_DEBUG,
4336 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4337 delta, ifile->ts_offset);
4338 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4339 if (pkt.pts != AV_NOPTS_VALUE)
4340 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4344 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4345 if (pkt.pts != AV_NOPTS_VALUE) {
4346 pkt.pts += duration;
4347 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4348 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4351 if (pkt.dts != AV_NOPTS_VALUE)
4352 pkt.dts += duration;
4354 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4355 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4356 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4357 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4359 int64_t delta = pkt_dts - ist->next_dts;
4360 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4361 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4362 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4363 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4364 ifile->ts_offset -= delta;
4365 av_log(NULL, AV_LOG_DEBUG,
4366 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4367 delta, ifile->ts_offset);
4368 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4369 if (pkt.pts != AV_NOPTS_VALUE)
4370 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4373 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4374 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4375 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4376 pkt.dts = AV_NOPTS_VALUE;
4378 if (pkt.pts != AV_NOPTS_VALUE){
4379 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4380 delta = pkt_pts - ist->next_dts;
4381 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4382 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4383 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4384 pkt.pts = AV_NOPTS_VALUE;
4390 if (pkt.dts != AV_NOPTS_VALUE)
4391 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4394 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4395 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4396 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4397 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4398 av_ts2str(input_files[ist->file_index]->ts_offset),
4399 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4402 sub2video_heartbeat(ist, pkt.pts);
4404 process_input_packet(ist, &pkt, 0);
4407 av_packet_unref(&pkt);
4413 * Perform a step of transcoding for the specified filter graph.
4415 * @param[in] graph filter graph to consider
4416 * @param[out] best_ist input stream where a frame would allow to continue
4417 * @return 0 for success, <0 for error
4419 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4422 int nb_requests, nb_requests_max = 0;
4423 InputFilter *ifilter;
4427 ret = avfilter_graph_request_oldest(graph->graph);
4429 return reap_filters(0);
4431 if (ret == AVERROR_EOF) {
4432 ret = reap_filters(1);
4433 for (i = 0; i < graph->nb_outputs; i++)
4434 close_output_stream(graph->outputs[i]->ost);
4437 if (ret != AVERROR(EAGAIN))
4440 for (i = 0; i < graph->nb_inputs; i++) {
4441 ifilter = graph->inputs[i];
4443 if (input_files[ist->file_index]->eagain ||
4444 input_files[ist->file_index]->eof_reached)
4446 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4447 if (nb_requests > nb_requests_max) {
4448 nb_requests_max = nb_requests;
4454 for (i = 0; i < graph->nb_outputs; i++)
4455 graph->outputs[i]->ost->unavailable = 1;
4461 * Run a single step of transcoding.
4463 * @return 0 for success, <0 for error
4465 static int transcode_step(void)
4468 InputStream *ist = NULL;
4471 ost = choose_output();
4478 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4482 if (ost->filter && !ost->filter->graph->graph) {
4483 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4484 ret = configure_filtergraph(ost->filter->graph);
4486 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4492 if (ost->filter && ost->filter->graph->graph) {
4493 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4497 } else if (ost->filter) {
4499 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4500 InputFilter *ifilter = ost->filter->graph->inputs[i];
4501 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4507 ost->inputs_done = 1;
4511 av_assert0(ost->source_index >= 0);
4512 ist = input_streams[ost->source_index];
4515 ret = process_input(ist->file_index);
4516 if (ret == AVERROR(EAGAIN)) {
4517 if (input_files[ist->file_index]->eagain)
4518 ost->unavailable = 1;
4523 return ret == AVERROR_EOF ? 0 : ret;
4525 return reap_filters(0);
4529 * The following code is the main loop of the file converter
4531 static int transcode(void)
4534 AVFormatContext *os;
4537 int64_t timer_start;
4538 int64_t total_packets_written = 0;
4540 ret = transcode_init();
4544 if (stdin_interaction) {
4545 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4548 timer_start = av_gettime_relative();
4551 if ((ret = init_input_threads()) < 0)
4555 while (!received_sigterm) {
4556 int64_t cur_time= av_gettime_relative();
4558 /* if 'q' pressed, exits */
4559 if (stdin_interaction)
4560 if (check_keyboard_interaction(cur_time) < 0)
4563 /* check if there's any stream where output is still needed */
4564 if (!need_output()) {
4565 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4569 ret = transcode_step();
4570 if (ret < 0 && ret != AVERROR_EOF) {
4572 av_strerror(ret, errbuf, sizeof(errbuf));
4574 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4578 /* dump report by using the output first video and audio streams */
4579 print_report(0, timer_start, cur_time);
4582 free_input_threads();
4585 /* at the end of stream, we must flush the decoder buffers */
4586 for (i = 0; i < nb_input_streams; i++) {
4587 ist = input_streams[i];
4588 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4589 process_input_packet(ist, NULL, 0);
4596 /* write the trailer if needed and close file */
4597 for (i = 0; i < nb_output_files; i++) {
4598 os = output_files[i]->ctx;
4599 if (!output_files[i]->header_written) {
4600 av_log(NULL, AV_LOG_ERROR,
4601 "Nothing was written into output file %d (%s), because "
4602 "at least one of its streams received no packets.\n",
4606 if ((ret = av_write_trailer(os)) < 0) {
4607 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4613 /* dump report by using the first video and audio streams */
4614 print_report(1, timer_start, av_gettime_relative());
4616 /* close each encoder */
4617 for (i = 0; i < nb_output_streams; i++) {
4618 ost = output_streams[i];
4619 if (ost->encoding_needed) {
4620 av_freep(&ost->enc_ctx->stats_in);
4622 total_packets_written += ost->packets_written;
4625 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4626 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4630 /* close each decoder */
4631 for (i = 0; i < nb_input_streams; i++) {
4632 ist = input_streams[i];
4633 if (ist->decoding_needed) {
4634 avcodec_close(ist->dec_ctx);
4635 if (ist->hwaccel_uninit)
4636 ist->hwaccel_uninit(ist->dec_ctx);
4640 av_buffer_unref(&hw_device_ctx);
4647 free_input_threads();
4650 if (output_streams) {
4651 for (i = 0; i < nb_output_streams; i++) {
4652 ost = output_streams[i];
4655 if (fclose(ost->logfile))
4656 av_log(NULL, AV_LOG_ERROR,
4657 "Error closing logfile, loss of information possible: %s\n",
4658 av_err2str(AVERROR(errno)));
4659 ost->logfile = NULL;
4661 av_freep(&ost->forced_kf_pts);
4662 av_freep(&ost->apad);
4663 av_freep(&ost->disposition);
4664 av_dict_free(&ost->encoder_opts);
4665 av_dict_free(&ost->sws_dict);
4666 av_dict_free(&ost->swr_opts);
4667 av_dict_free(&ost->resample_opts);
4675 static int64_t getutime(void)
4678 struct rusage rusage;
4680 getrusage(RUSAGE_SELF, &rusage);
4681 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4682 #elif HAVE_GETPROCESSTIMES
4684 FILETIME c, e, k, u;
4685 proc = GetCurrentProcess();
4686 GetProcessTimes(proc, &c, &e, &k, &u);
4687 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4689 return av_gettime_relative();
4693 static int64_t getmaxrss(void)
4695 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4696 struct rusage rusage;
4697 getrusage(RUSAGE_SELF, &rusage);
4698 return (int64_t)rusage.ru_maxrss * 1024;
4699 #elif HAVE_GETPROCESSMEMORYINFO
4701 PROCESS_MEMORY_COUNTERS memcounters;
4702 proc = GetCurrentProcess();
4703 memcounters.cb = sizeof(memcounters);
4704 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4705 return memcounters.PeakPagefileUsage;
4711 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4715 int main(int argc, char **argv)
4722 register_exit(ffmpeg_cleanup);
4724 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4726 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4727 parse_loglevel(argc, argv, options);
4729 if(argc>1 && !strcmp(argv[1], "-d")){
4731 av_log_set_callback(log_callback_null);
4736 avcodec_register_all();
4738 avdevice_register_all();
4740 avfilter_register_all();
4742 avformat_network_init();
4744 show_banner(argc, argv, options);
4746 /* parse options and open all input/output files */
4747 ret = ffmpeg_parse_options(argc, argv);
4751 if (nb_output_files <= 0 && nb_input_files == 0) {
4753 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4757 /* file converter / grab */
4758 if (nb_output_files <= 0) {
4759 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4763 // if (nb_input_files == 0) {
4764 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4768 for (i = 0; i < nb_output_files; i++) {
4769 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4773 current_time = ti = getutime();
4774 if (transcode() < 0)
4776 ti = getutime() - ti;
4778 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4780 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4781 decode_error_stat[0], decode_error_stat[1]);
4782 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4785 exit_program(received_nb_signals ? 255 : main_return_code);
4786 return main_return_code;