2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/hwcontext.h"
51 #include "libavutil/internal.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/display.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
128 static int ifilter_has_all_input_formats(FilterGraph *fg);
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static unsigned dup_warning = 1000;
133 static int nb_frames_drop = 0;
134 static int64_t decode_error_stat[2];
136 static int want_sdp = 1;
138 static int current_time;
139 AVIOContext *progress_avio = NULL;
141 static uint8_t *subtitle_out;
143 InputStream **input_streams = NULL;
144 int nb_input_streams = 0;
145 InputFile **input_files = NULL;
146 int nb_input_files = 0;
148 OutputStream **output_streams = NULL;
149 int nb_output_streams = 0;
150 OutputFile **output_files = NULL;
151 int nb_output_files = 0;
153 FilterGraph **filtergraphs;
158 /* init terminal so that we can grab keys */
159 static struct termios oldtty;
160 static int restore_tty;
164 static void free_input_threads(void);
168 Convert subtitles to video with alpha to insert them in filter graphs.
169 This is a temporary solution until libavfilter gets real subtitles support.
172 static int sub2video_get_blank_frame(InputStream *ist)
175 AVFrame *frame = ist->sub2video.frame;
177 av_frame_unref(frame);
178 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
179 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
180 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
181 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
187 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
190 uint32_t *pal, *dst2;
194 if (r->type != SUBTITLE_BITMAP) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
198 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
200 r->x, r->y, r->w, r->h, w, h
205 dst += r->y * dst_linesize + r->x * 4;
207 pal = (uint32_t *)r->data[1];
208 for (y = 0; y < r->h; y++) {
209 dst2 = (uint32_t *)dst;
211 for (x = 0; x < r->w; x++)
212 *(dst2++) = pal[*(src2++)];
214 src += r->linesize[0];
218 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 AVFrame *frame = ist->sub2video.frame;
223 av_assert1(frame->data[0]);
224 ist->sub2video.last_pts = frame->pts = pts;
225 for (i = 0; i < ist->nb_filters; i++)
226 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
227 AV_BUFFERSRC_FLAG_KEEP_REF |
228 AV_BUFFERSRC_FLAG_PUSH);
231 void sub2video_update(InputStream *ist, AVSubtitle *sub)
233 AVFrame *frame = ist->sub2video.frame;
237 int64_t pts, end_pts;
242 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
243 AV_TIME_BASE_Q, ist->st->time_base);
244 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
245 AV_TIME_BASE_Q, ist->st->time_base);
246 num_rects = sub->num_rects;
248 pts = ist->sub2video.end_pts;
252 if (sub2video_get_blank_frame(ist) < 0) {
253 av_log(ist->dec_ctx, AV_LOG_ERROR,
254 "Impossible to get a blank canvas.\n");
257 dst = frame->data [0];
258 dst_linesize = frame->linesize[0];
259 for (i = 0; i < num_rects; i++)
260 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
261 sub2video_push_ref(ist, pts);
262 ist->sub2video.end_pts = end_pts;
265 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 InputFile *infile = input_files[ist->file_index];
271 /* When a frame is read from a file, examine all sub2video streams in
272 the same file and send the sub2video frame again. Otherwise, decoded
273 video frames could be accumulating in the filter graph while a filter
274 (possibly overlay) is desperately waiting for a subtitle frame. */
275 for (i = 0; i < infile->nb_streams; i++) {
276 InputStream *ist2 = input_streams[infile->ist_index + i];
277 if (!ist2->sub2video.frame)
279 /* subtitles seem to be usually muxed ahead of other streams;
280 if not, subtracting a larger time here is necessary */
281 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
282 /* do not send the heartbeat frame if the subtitle is already ahead */
283 if (pts2 <= ist2->sub2video.last_pts)
285 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
286 sub2video_update(ist2, NULL);
287 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
288 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290 sub2video_push_ref(ist2, pts2);
294 static void sub2video_flush(InputStream *ist)
298 if (ist->sub2video.end_pts < INT64_MAX)
299 sub2video_update(ist, NULL);
300 for (i = 0; i < ist->nb_filters; i++)
301 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
304 /* end of sub2video hack */
306 static void term_exit_sigsafe(void)
310 tcsetattr (0, TCSANOW, &oldtty);
316 av_log(NULL, AV_LOG_QUIET, "%s", "");
320 static volatile int received_sigterm = 0;
321 static volatile int received_nb_signals = 0;
322 static volatile int transcode_init_done = 0;
323 static volatile int ffmpeg_exited = 0;
324 static int main_return_code = 0;
327 sigterm_handler(int sig)
329 received_sigterm = sig;
330 received_nb_signals++;
332 if(received_nb_signals > 3) {
333 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
334 strlen("Received > 3 system signals, hard exiting\n"));
340 #if HAVE_SETCONSOLECTRLHANDLER
341 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
348 case CTRL_BREAK_EVENT:
349 sigterm_handler(SIGINT);
352 case CTRL_CLOSE_EVENT:
353 case CTRL_LOGOFF_EVENT:
354 case CTRL_SHUTDOWN_EVENT:
355 sigterm_handler(SIGTERM);
356 /* Basically, with these 3 events, when we return from this method the
357 process is hard terminated, so stall as long as we need to
358 to try and let the main thread(s) clean up and gracefully terminate
359 (we have at most 5 seconds, but should be done far before that). */
360 while (!ffmpeg_exited) {
366 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
375 if (!run_as_daemon && stdin_interaction) {
377 if (tcgetattr (0, &tty) == 0) {
381 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
382 |INLCR|IGNCR|ICRNL|IXON);
383 tty.c_oflag |= OPOST;
384 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
385 tty.c_cflag &= ~(CSIZE|PARENB);
390 tcsetattr (0, TCSANOW, &tty);
392 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
396 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
397 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 signal(SIGXCPU, sigterm_handler);
401 #if HAVE_SETCONSOLECTRLHANDLER
402 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
406 /* read a key without blocking */
407 static int read_key(void)
419 n = select(1, &rfds, NULL, NULL, &tv);
428 # if HAVE_PEEKNAMEDPIPE
430 static HANDLE input_handle;
433 input_handle = GetStdHandle(STD_INPUT_HANDLE);
434 is_pipe = !GetConsoleMode(input_handle, &dw);
438 /* When running under a GUI, you will end here. */
439 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
440 // input pipe may have been closed by the program that ran ffmpeg
458 static int decode_interrupt_cb(void *ctx)
460 return received_nb_signals > transcode_init_done;
463 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465 static void ffmpeg_cleanup(int ret)
470 int maxrss = getmaxrss() / 1024;
471 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
474 for (i = 0; i < nb_filtergraphs; i++) {
475 FilterGraph *fg = filtergraphs[i];
476 avfilter_graph_free(&fg->graph);
477 for (j = 0; j < fg->nb_inputs; j++) {
478 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
481 sizeof(frame), NULL);
482 av_frame_free(&frame);
484 av_fifo_free(fg->inputs[j]->frame_queue);
485 if (fg->inputs[j]->ist->sub2video.sub_queue) {
486 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
489 &sub, sizeof(sub), NULL);
490 avsubtitle_free(&sub);
492 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
494 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
495 av_freep(&fg->inputs[j]->name);
496 av_freep(&fg->inputs[j]);
498 av_freep(&fg->inputs);
499 for (j = 0; j < fg->nb_outputs; j++) {
500 av_freep(&fg->outputs[j]->name);
501 av_freep(&fg->outputs[j]->formats);
502 av_freep(&fg->outputs[j]->channel_layouts);
503 av_freep(&fg->outputs[j]->sample_rates);
504 av_freep(&fg->outputs[j]);
506 av_freep(&fg->outputs);
507 av_freep(&fg->graph_desc);
509 av_freep(&filtergraphs[i]);
511 av_freep(&filtergraphs);
513 av_freep(&subtitle_out);
516 for (i = 0; i < nb_output_files; i++) {
517 OutputFile *of = output_files[i];
522 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524 avformat_free_context(s);
525 av_dict_free(&of->opts);
527 av_freep(&output_files[i]);
529 for (i = 0; i < nb_output_streams; i++) {
530 OutputStream *ost = output_streams[i];
535 for (j = 0; j < ost->nb_bitstream_filters; j++)
536 av_bsf_free(&ost->bsf_ctx[j]);
537 av_freep(&ost->bsf_ctx);
538 av_freep(&ost->bsf_extradata_updated);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && transcode_init_done) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
677 if (!of->header_written) {
678 AVPacket tmp_pkt = {0};
679 /* the muxer is not initialized yet, buffer the packet */
680 if (!av_fifo_space(ost->muxing_queue)) {
681 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
682 ost->max_muxing_queue_size);
683 if (new_size <= av_fifo_size(ost->muxing_queue)) {
684 av_log(NULL, AV_LOG_ERROR,
685 "Too many packets buffered for output stream %d:%d.\n",
686 ost->file_index, ost->st->index);
689 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
693 ret = av_packet_ref(&tmp_pkt, pkt);
696 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
697 av_packet_unref(pkt);
701 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
702 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
703 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
706 * Audio encoders may split the packets -- #frames in != #packets out.
707 * But there is no reordering, so we can limit the number of output packets
708 * by simply dropping them here.
709 * Counting encoded video frames needs to be done separately because of
710 * reordering, see do_video_out()
712 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
713 if (ost->frame_number >= ost->max_frames) {
714 av_packet_unref(pkt);
719 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
721 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
723 ost->quality = sd ? AV_RL32(sd) : -1;
724 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
726 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
728 ost->error[i] = AV_RL64(sd + 8 + 8*i);
733 if (ost->frame_rate.num && ost->is_cfr) {
734 if (pkt->duration > 0)
735 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
736 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
741 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
743 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
744 if (pkt->dts != AV_NOPTS_VALUE &&
745 pkt->pts != AV_NOPTS_VALUE &&
746 pkt->dts > pkt->pts) {
747 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
749 ost->file_index, ost->st->index);
751 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
752 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
753 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
755 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
756 pkt->dts != AV_NOPTS_VALUE &&
757 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
758 ost->last_mux_dts != AV_NOPTS_VALUE) {
759 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
760 if (pkt->dts < max) {
761 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
762 av_log(s, loglevel, "Non-monotonous DTS in output stream "
763 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
764 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
766 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
769 av_log(s, loglevel, "changing to %"PRId64". This may result "
770 "in incorrect timestamps in the output file.\n",
772 if (pkt->pts >= pkt->dts)
773 pkt->pts = FFMAX(pkt->pts, max);
778 ost->last_mux_dts = pkt->dts;
780 ost->data_size += pkt->size;
781 ost->packets_written++;
783 pkt->stream_index = ost->index;
786 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
787 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
788 av_get_media_type_string(ost->enc_ctx->codec_type),
789 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
790 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
795 ret = av_interleaved_write_frame(s, pkt);
797 print_error("av_interleaved_write_frame()", ret);
798 main_return_code = 1;
799 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
801 av_packet_unref(pkt);
804 static void close_output_stream(OutputStream *ost)
806 OutputFile *of = output_files[ost->file_index];
808 ost->finished |= ENCODER_FINISHED;
810 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
811 of->recording_time = FFMIN(of->recording_time, end);
815 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
819 /* apply the output bitstream filters, if any */
820 if (ost->nb_bitstream_filters) {
823 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
829 /* get a packet from the previous filter up the chain */
830 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
831 if (ret == AVERROR(EAGAIN)) {
837 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
838 * the api states this shouldn't happen after init(). Propagate it here to the
839 * muxer and to the next filters in the chain to workaround this.
840 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
841 * par_out->extradata and adapt muxers accordingly to get rid of this. */
842 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
843 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
846 ost->bsf_extradata_updated[idx - 1] |= 1;
849 /* send it to the next filter down the chain or to the muxer */
850 if (idx < ost->nb_bitstream_filters) {
851 /* HACK/FIXME! - See above */
852 if (!(ost->bsf_extradata_updated[idx] & 2)) {
853 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
856 ost->bsf_extradata_updated[idx] |= 2;
858 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
863 write_packet(of, pkt, ost);
866 write_packet(of, pkt, ost);
869 if (ret < 0 && ret != AVERROR_EOF) {
870 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
871 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
877 static int check_recording_time(OutputStream *ost)
879 OutputFile *of = output_files[ost->file_index];
881 if (of->recording_time != INT64_MAX &&
882 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
883 AV_TIME_BASE_Q) >= 0) {
884 close_output_stream(ost);
890 static void do_audio_out(OutputFile *of, OutputStream *ost,
893 AVCodecContext *enc = ost->enc_ctx;
897 av_init_packet(&pkt);
901 if (!check_recording_time(ost))
904 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
905 frame->pts = ost->sync_opts;
906 ost->sync_opts = frame->pts + frame->nb_samples;
907 ost->samples_encoded += frame->nb_samples;
908 ost->frames_encoded++;
910 av_assert0(pkt.size || !pkt.data);
911 update_benchmark(NULL);
913 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
914 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
915 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
916 enc->time_base.num, enc->time_base.den);
919 ret = avcodec_send_frame(enc, frame);
924 ret = avcodec_receive_packet(enc, &pkt);
925 if (ret == AVERROR(EAGAIN))
930 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
932 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
935 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
936 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
937 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
938 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
941 output_packet(of, &pkt, ost);
946 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
950 static void do_subtitle_out(OutputFile *of,
954 int subtitle_out_max_size = 1024 * 1024;
955 int subtitle_out_size, nb, i;
960 if (sub->pts == AV_NOPTS_VALUE) {
961 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
970 subtitle_out = av_malloc(subtitle_out_max_size);
972 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
977 /* Note: DVB subtitle need one packet to draw them and one other
978 packet to clear them */
979 /* XXX: signal it in the codec context ? */
980 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
985 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
987 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
988 pts -= output_files[ost->file_index]->start_time;
989 for (i = 0; i < nb; i++) {
990 unsigned save_num_rects = sub->num_rects;
992 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
993 if (!check_recording_time(ost))
997 // start_display_time is required to be 0
998 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
999 sub->end_display_time -= sub->start_display_time;
1000 sub->start_display_time = 0;
1004 ost->frames_encoded++;
1006 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1007 subtitle_out_max_size, sub);
1009 sub->num_rects = save_num_rects;
1010 if (subtitle_out_size < 0) {
1011 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1015 av_init_packet(&pkt);
1016 pkt.data = subtitle_out;
1017 pkt.size = subtitle_out_size;
1018 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1019 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1020 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1021 /* XXX: the pts correction is handled here. Maybe handling
1022 it in the codec would be better */
1024 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1026 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029 output_packet(of, &pkt, ost);
1033 static void do_video_out(OutputFile *of,
1035 AVFrame *next_picture,
1038 int ret, format_video_sync;
1040 AVCodecContext *enc = ost->enc_ctx;
1041 AVCodecParameters *mux_par = ost->st->codecpar;
1042 AVRational frame_rate;
1043 int nb_frames, nb0_frames, i;
1044 double delta, delta0;
1045 double duration = 0;
1047 InputStream *ist = NULL;
1048 AVFilterContext *filter = ost->filter->filter;
1050 if (ost->source_index >= 0)
1051 ist = input_streams[ost->source_index];
1053 frame_rate = av_buffersink_get_frame_rate(filter);
1054 if (frame_rate.num > 0 && frame_rate.den > 0)
1055 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1057 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1058 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1060 if (!ost->filters_script &&
1064 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1065 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1068 if (!next_picture) {
1070 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1071 ost->last_nb0_frames[1],
1072 ost->last_nb0_frames[2]);
1074 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1075 delta = delta0 + duration;
1077 /* by default, we output a single frame */
1078 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1081 format_video_sync = video_sync_method;
1082 if (format_video_sync == VSYNC_AUTO) {
1083 if(!strcmp(of->ctx->oformat->name, "avi")) {
1084 format_video_sync = VSYNC_VFR;
1086 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1088 && format_video_sync == VSYNC_CFR
1089 && input_files[ist->file_index]->ctx->nb_streams == 1
1090 && input_files[ist->file_index]->input_ts_offset == 0) {
1091 format_video_sync = VSYNC_VSCFR;
1093 if (format_video_sync == VSYNC_CFR && copy_ts) {
1094 format_video_sync = VSYNC_VSCFR;
1097 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1101 format_video_sync != VSYNC_PASSTHROUGH &&
1102 format_video_sync != VSYNC_DROP) {
1103 if (delta0 < -0.6) {
1104 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1106 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1107 sync_ipts = ost->sync_opts;
1112 switch (format_video_sync) {
1114 if (ost->frame_number == 0 && delta0 >= 0.5) {
1115 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1118 ost->sync_opts = lrint(sync_ipts);
1121 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1122 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1124 } else if (delta < -1.1)
1126 else if (delta > 1.1) {
1127 nb_frames = lrintf(delta);
1129 nb0_frames = lrintf(delta0 - 0.6);
1135 else if (delta > 0.6)
1136 ost->sync_opts = lrint(sync_ipts);
1139 case VSYNC_PASSTHROUGH:
1140 ost->sync_opts = lrint(sync_ipts);
1147 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1148 nb0_frames = FFMIN(nb0_frames, nb_frames);
1150 memmove(ost->last_nb0_frames + 1,
1151 ost->last_nb0_frames,
1152 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1153 ost->last_nb0_frames[0] = nb0_frames;
1155 if (nb0_frames == 0 && ost->last_dropped) {
1157 av_log(NULL, AV_LOG_VERBOSE,
1158 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1159 ost->frame_number, ost->st->index, ost->last_frame->pts);
1161 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1162 if (nb_frames > dts_error_threshold * 30) {
1163 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1167 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1168 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1169 if (nb_frames_dup > dup_warning) {
1170 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1174 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1176 /* duplicates frame if needed */
1177 for (i = 0; i < nb_frames; i++) {
1178 AVFrame *in_picture;
1179 av_init_packet(&pkt);
1183 if (i < nb0_frames && ost->last_frame) {
1184 in_picture = ost->last_frame;
1186 in_picture = next_picture;
1191 in_picture->pts = ost->sync_opts;
1194 if (!check_recording_time(ost))
1196 if (ost->frame_number >= ost->max_frames)
1200 #if FF_API_LAVF_FMT_RAWPICTURE
1201 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1202 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1203 /* raw pictures are written as AVPicture structure to
1204 avoid any copies. We support temporarily the older
1206 if (in_picture->interlaced_frame)
1207 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1209 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1210 pkt.data = (uint8_t *)in_picture;
1211 pkt.size = sizeof(AVPicture);
1212 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1213 pkt.flags |= AV_PKT_FLAG_KEY;
1215 output_packet(of, &pkt, ost);
1219 int forced_keyframe = 0;
1222 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1223 ost->top_field_first >= 0)
1224 in_picture->top_field_first = !!ost->top_field_first;
1226 if (in_picture->interlaced_frame) {
1227 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1228 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1232 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1234 in_picture->quality = enc->global_quality;
1235 in_picture->pict_type = 0;
1237 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1238 in_picture->pts * av_q2d(enc->time_base) : NAN;
1239 if (ost->forced_kf_index < ost->forced_kf_count &&
1240 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1241 ost->forced_kf_index++;
1242 forced_keyframe = 1;
1243 } else if (ost->forced_keyframes_pexpr) {
1245 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1246 res = av_expr_eval(ost->forced_keyframes_pexpr,
1247 ost->forced_keyframes_expr_const_values, NULL);
1248 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1249 ost->forced_keyframes_expr_const_values[FKF_N],
1250 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1251 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1252 ost->forced_keyframes_expr_const_values[FKF_T],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1256 forced_keyframe = 1;
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1258 ost->forced_keyframes_expr_const_values[FKF_N];
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1260 ost->forced_keyframes_expr_const_values[FKF_T];
1261 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1264 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1265 } else if ( ost->forced_keyframes
1266 && !strncmp(ost->forced_keyframes, "source", 6)
1267 && in_picture->key_frame==1) {
1268 forced_keyframe = 1;
1271 if (forced_keyframe) {
1272 in_picture->pict_type = AV_PICTURE_TYPE_I;
1273 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276 update_benchmark(NULL);
1278 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1279 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1280 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1281 enc->time_base.num, enc->time_base.den);
1284 ost->frames_encoded++;
1286 ret = avcodec_send_frame(enc, in_picture);
1291 ret = avcodec_receive_packet(enc, &pkt);
1292 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1293 if (ret == AVERROR(EAGAIN))
1299 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1300 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1301 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1302 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1305 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1306 pkt.pts = ost->sync_opts;
1308 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1311 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1312 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1313 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1314 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1317 frame_size = pkt.size;
1318 output_packet(of, &pkt, ost);
1320 /* if two pass, output log */
1321 if (ost->logfile && enc->stats_out) {
1322 fprintf(ost->logfile, "%s", enc->stats_out);
1328 * For video, number of frames in == number of packets out.
1329 * But there may be reordering, so we can't throw away frames on encoder
1330 * flush, we need to limit them here, before they go into encoder.
1332 ost->frame_number++;
1334 if (vstats_filename && frame_size)
1335 do_video_stats(ost, frame_size);
1338 if (!ost->last_frame)
1339 ost->last_frame = av_frame_alloc();
1340 av_frame_unref(ost->last_frame);
1341 if (next_picture && ost->last_frame)
1342 av_frame_ref(ost->last_frame, next_picture);
1344 av_frame_free(&ost->last_frame);
1348 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352 static double psnr(double d)
1354 return -10.0 * log10(d);
1357 static void do_video_stats(OutputStream *ost, int frame_size)
1359 AVCodecContext *enc;
1361 double ti1, bitrate, avg_bitrate;
1363 /* this is executed just the first time do_video_stats is called */
1365 vstats_file = fopen(vstats_filename, "w");
1373 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1374 frame_number = ost->st->nb_frames;
1375 if (vstats_version <= 1) {
1376 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1377 ost->quality / (float)FF_QP2LAMBDA);
1379 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1380 ost->quality / (float)FF_QP2LAMBDA);
1383 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1384 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1386 fprintf(vstats_file,"f_size= %6d ", frame_size);
1387 /* compute pts value */
1388 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1393 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1394 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1395 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1396 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1402 static void finish_output_stream(OutputStream *ost)
1404 OutputFile *of = output_files[ost->file_index];
1407 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1410 for (i = 0; i < of->ctx->nb_streams; i++)
1411 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1416 * Get and encode new output from any of the filtergraphs, without causing
1419 * @return 0 for success, <0 for severe errors
1421 static int reap_filters(int flush)
1423 AVFrame *filtered_frame = NULL;
1426 /* Reap all buffers present in the buffer sinks */
1427 for (i = 0; i < nb_output_streams; i++) {
1428 OutputStream *ost = output_streams[i];
1429 OutputFile *of = output_files[ost->file_index];
1430 AVFilterContext *filter;
1431 AVCodecContext *enc = ost->enc_ctx;
1434 if (!ost->filter || !ost->filter->graph->graph)
1436 filter = ost->filter->filter;
1438 if (!ost->initialized) {
1439 char error[1024] = "";
1440 ret = init_output_stream(ost, error, sizeof(error));
1442 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1443 ost->file_index, ost->index, error);
1448 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1449 return AVERROR(ENOMEM);
1451 filtered_frame = ost->filtered_frame;
1454 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1455 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1456 AV_BUFFERSINK_FLAG_NO_REQUEST);
1458 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1459 av_log(NULL, AV_LOG_WARNING,
1460 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1461 } else if (flush && ret == AVERROR_EOF) {
1462 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1463 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1467 if (ost->finished) {
1468 av_frame_unref(filtered_frame);
1471 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1472 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1473 AVRational filter_tb = av_buffersink_get_time_base(filter);
1474 AVRational tb = enc->time_base;
1475 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1477 tb.den <<= extra_bits;
1479 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1480 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1481 float_pts /= 1 << extra_bits;
1482 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1483 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1485 filtered_frame->pts =
1486 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1487 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1489 //if (ost->source_index >= 0)
1490 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1492 switch (av_buffersink_get_type(filter)) {
1493 case AVMEDIA_TYPE_VIDEO:
1494 if (!ost->frame_aspect_ratio.num)
1495 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1498 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1499 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501 enc->time_base.num, enc->time_base.den);
1504 do_video_out(of, ost, filtered_frame, float_pts);
1506 case AVMEDIA_TYPE_AUDIO:
1507 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1508 enc->channels != av_frame_get_channels(filtered_frame)) {
1509 av_log(NULL, AV_LOG_ERROR,
1510 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1513 do_audio_out(of, ost, filtered_frame);
1516 // TODO support subtitle filters
1520 av_frame_unref(filtered_frame);
1527 static void print_final_stats(int64_t total_size)
1529 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1530 uint64_t subtitle_size = 0;
1531 uint64_t data_size = 0;
1532 float percent = -1.0;
1536 for (i = 0; i < nb_output_streams; i++) {
1537 OutputStream *ost = output_streams[i];
1538 switch (ost->enc_ctx->codec_type) {
1539 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1540 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1541 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1542 default: other_size += ost->data_size; break;
1544 extra_size += ost->enc_ctx->extradata_size;
1545 data_size += ost->data_size;
1546 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1547 != AV_CODEC_FLAG_PASS1)
1551 if (data_size && total_size>0 && total_size >= data_size)
1552 percent = 100.0 * (total_size - data_size) / data_size;
1554 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1555 video_size / 1024.0,
1556 audio_size / 1024.0,
1557 subtitle_size / 1024.0,
1558 other_size / 1024.0,
1559 extra_size / 1024.0);
1561 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563 av_log(NULL, AV_LOG_INFO, "unknown");
1564 av_log(NULL, AV_LOG_INFO, "\n");
1566 /* print verbose per-stream stats */
1567 for (i = 0; i < nb_input_files; i++) {
1568 InputFile *f = input_files[i];
1569 uint64_t total_packets = 0, total_size = 0;
1571 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1572 i, f->ctx->filename);
1574 for (j = 0; j < f->nb_streams; j++) {
1575 InputStream *ist = input_streams[f->ist_index + j];
1576 enum AVMediaType type = ist->dec_ctx->codec_type;
1578 total_size += ist->data_size;
1579 total_packets += ist->nb_packets;
1581 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1582 i, j, media_type_string(type));
1583 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1584 ist->nb_packets, ist->data_size);
1586 if (ist->decoding_needed) {
1587 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1588 ist->frames_decoded);
1589 if (type == AVMEDIA_TYPE_AUDIO)
1590 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1591 av_log(NULL, AV_LOG_VERBOSE, "; ");
1594 av_log(NULL, AV_LOG_VERBOSE, "\n");
1597 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1598 total_packets, total_size);
1601 for (i = 0; i < nb_output_files; i++) {
1602 OutputFile *of = output_files[i];
1603 uint64_t total_packets = 0, total_size = 0;
1605 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1606 i, of->ctx->filename);
1608 for (j = 0; j < of->ctx->nb_streams; j++) {
1609 OutputStream *ost = output_streams[of->ost_index + j];
1610 enum AVMediaType type = ost->enc_ctx->codec_type;
1612 total_size += ost->data_size;
1613 total_packets += ost->packets_written;
1615 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1616 i, j, media_type_string(type));
1617 if (ost->encoding_needed) {
1618 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1619 ost->frames_encoded);
1620 if (type == AVMEDIA_TYPE_AUDIO)
1621 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1622 av_log(NULL, AV_LOG_VERBOSE, "; ");
1625 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1626 ost->packets_written, ost->data_size);
1628 av_log(NULL, AV_LOG_VERBOSE, "\n");
1631 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1632 total_packets, total_size);
1634 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1635 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637 av_log(NULL, AV_LOG_WARNING, "\n");
1639 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1644 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1647 AVBPrint buf_script;
1649 AVFormatContext *oc;
1651 AVCodecContext *enc;
1652 int frame_number, vid, i;
1655 int64_t pts = INT64_MIN + 1;
1656 static int64_t last_time = -1;
1657 static int qp_histogram[52];
1658 int hours, mins, secs, us;
1662 if (!print_stats && !is_last_report && !progress_avio)
1665 if (!is_last_report) {
1666 if (last_time == -1) {
1667 last_time = cur_time;
1670 if ((cur_time - last_time) < 500000)
1672 last_time = cur_time;
1675 t = (cur_time-timer_start) / 1000000.0;
1678 oc = output_files[0]->ctx;
1680 total_size = avio_size(oc->pb);
1681 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1682 total_size = avio_tell(oc->pb);
1686 av_bprint_init(&buf_script, 0, 1);
1687 for (i = 0; i < nb_output_streams; i++) {
1689 ost = output_streams[i];
1691 if (!ost->stream_copy)
1692 q = ost->quality / (float) FF_QP2LAMBDA;
1694 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1696 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1697 ost->file_index, ost->index, q);
1699 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702 frame_number = ost->frame_number;
1703 fps = t > 1 ? frame_number / t : 0;
1704 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1705 frame_number, fps < 9.95, fps, q);
1706 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1707 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1708 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1709 ost->file_index, ost->index, q);
1711 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1715 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717 for (j = 0; j < 32; j++)
1718 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1721 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723 double error, error_sum = 0;
1724 double scale, scale_sum = 0;
1726 char type[3] = { 'Y','U','V' };
1727 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1728 for (j = 0; j < 3; j++) {
1729 if (is_last_report) {
1730 error = enc->error[j];
1731 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733 error = ost->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0;
1740 p = psnr(error / scale);
1741 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1742 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1743 ost->file_index, ost->index, type[j] | 32, p);
1745 p = psnr(error_sum / scale_sum);
1746 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1747 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1748 ost->file_index, ost->index, p);
1752 /* compute min output value */
1753 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1754 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1755 ost->st->time_base, AV_TIME_BASE_Q));
1757 nb_frames_drop += ost->last_dropped;
1760 secs = FFABS(pts) / AV_TIME_BASE;
1761 us = FFABS(pts) % AV_TIME_BASE;
1767 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1768 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1770 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1772 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1773 "size=%8.0fkB time=", total_size / 1024.0);
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1776 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1777 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1778 (100 * us) / AV_TIME_BASE);
1781 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1782 av_bprintf(&buf_script, "bitrate=N/A\n");
1784 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1785 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1788 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1789 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1790 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1791 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1792 hours, mins, secs, us);
1794 if (nb_frames_dup || nb_frames_drop)
1795 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1796 nb_frames_dup, nb_frames_drop);
1797 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1798 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1801 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1802 av_bprintf(&buf_script, "speed=N/A\n");
1804 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1805 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1808 if (print_stats || is_last_report) {
1809 const char end = is_last_report ? '\n' : '\r';
1810 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1811 fprintf(stderr, "%s %c", buf, end);
1813 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1818 if (progress_avio) {
1819 av_bprintf(&buf_script, "progress=%s\n",
1820 is_last_report ? "end" : "continue");
1821 avio_write(progress_avio, buf_script.str,
1822 FFMIN(buf_script.len, buf_script.size - 1));
1823 avio_flush(progress_avio);
1824 av_bprint_finalize(&buf_script, NULL);
1825 if (is_last_report) {
1826 if ((ret = avio_closep(&progress_avio)) < 0)
1827 av_log(NULL, AV_LOG_ERROR,
1828 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1833 print_final_stats(total_size);
1836 static void flush_encoders(void)
1840 for (i = 0; i < nb_output_streams; i++) {
1841 OutputStream *ost = output_streams[i];
1842 AVCodecContext *enc = ost->enc_ctx;
1843 OutputFile *of = output_files[ost->file_index];
1845 if (!ost->encoding_needed)
1848 // Try to enable encoding with no input frames.
1849 // Maybe we should just let encoding fail instead.
1850 if (!ost->initialized) {
1851 FilterGraph *fg = ost->filter->graph;
1852 char error[1024] = "";
1854 av_log(NULL, AV_LOG_WARNING,
1855 "Finishing stream %d:%d without any data written to it.\n",
1856 ost->file_index, ost->st->index);
1858 if (ost->filter && !fg->graph) {
1860 for (x = 0; x < fg->nb_inputs; x++) {
1861 InputFilter *ifilter = fg->inputs[x];
1862 if (ifilter->format < 0) {
1863 AVCodecParameters *par = ifilter->ist->st->codecpar;
1864 // We never got any input. Set a fake format, which will
1865 // come from libavformat.
1866 ifilter->format = par->format;
1867 ifilter->sample_rate = par->sample_rate;
1868 ifilter->channels = par->channels;
1869 ifilter->channel_layout = par->channel_layout;
1870 ifilter->width = par->width;
1871 ifilter->height = par->height;
1872 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1876 if (!ifilter_has_all_input_formats(fg))
1879 ret = configure_filtergraph(fg);
1881 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1885 finish_output_stream(ost);
1888 ret = init_output_stream(ost, error, sizeof(error));
1890 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891 ost->file_index, ost->index, error);
1896 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1898 #if FF_API_LAVF_FMT_RAWPICTURE
1899 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1903 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1906 avcodec_send_frame(enc, NULL);
1909 const char *desc = NULL;
1913 switch (enc->codec_type) {
1914 case AVMEDIA_TYPE_AUDIO:
1917 case AVMEDIA_TYPE_VIDEO:
1924 av_init_packet(&pkt);
1928 update_benchmark(NULL);
1929 ret = avcodec_receive_packet(enc, &pkt);
1930 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1931 if (ret < 0 && ret != AVERROR_EOF) {
1932 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1937 if (ost->logfile && enc->stats_out) {
1938 fprintf(ost->logfile, "%s", enc->stats_out);
1940 if (ret == AVERROR_EOF) {
1943 if (ost->finished & MUXER_FINISHED) {
1944 av_packet_unref(&pkt);
1947 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1948 pkt_size = pkt.size;
1949 output_packet(of, &pkt, ost);
1950 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1951 do_video_stats(ost, pkt_size);
1958 * Check whether a packet from ist should be written into ost at this time
1960 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1962 OutputFile *of = output_files[ost->file_index];
1963 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1965 if (ost->source_index != ist_index)
1971 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1977 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1979 OutputFile *of = output_files[ost->file_index];
1980 InputFile *f = input_files [ist->file_index];
1981 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1982 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1986 av_init_packet(&opkt);
1988 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1989 !ost->copy_initial_nonkeyframes)
1992 if (!ost->frame_number && !ost->copy_prior_start) {
1993 int64_t comp_start = start_time;
1994 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1995 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1996 if (pkt->pts == AV_NOPTS_VALUE ?
1997 ist->pts < comp_start :
1998 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2002 if (of->recording_time != INT64_MAX &&
2003 ist->pts >= of->recording_time + start_time) {
2004 close_output_stream(ost);
2008 if (f->recording_time != INT64_MAX) {
2009 start_time = f->ctx->start_time;
2010 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2011 start_time += f->start_time;
2012 if (ist->pts >= f->recording_time + start_time) {
2013 close_output_stream(ost);
2018 /* force the input stream PTS */
2019 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2022 if (pkt->pts != AV_NOPTS_VALUE)
2023 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2025 opkt.pts = AV_NOPTS_VALUE;
2027 if (pkt->dts == AV_NOPTS_VALUE)
2028 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2030 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2031 opkt.dts -= ost_tb_start_time;
2033 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2034 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2036 duration = ist->dec_ctx->frame_size;
2037 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2038 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2039 ost->mux_timebase) - ost_tb_start_time;
2042 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2044 opkt.flags = pkt->flags;
2045 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2046 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2047 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2048 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2049 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2051 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2052 &opkt.data, &opkt.size,
2053 pkt->data, pkt->size,
2054 pkt->flags & AV_PKT_FLAG_KEY);
2056 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2061 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2066 opkt.data = pkt->data;
2067 opkt.size = pkt->size;
2069 av_copy_packet_side_data(&opkt, pkt);
2071 #if FF_API_LAVF_FMT_RAWPICTURE
2072 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2073 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2074 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2075 /* store AVPicture in AVPacket, as expected by the output format */
2076 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2078 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2082 opkt.data = (uint8_t *)&pict;
2083 opkt.size = sizeof(AVPicture);
2084 opkt.flags |= AV_PKT_FLAG_KEY;
2088 output_packet(of, &opkt, ost);
2091 int guess_input_channel_layout(InputStream *ist)
2093 AVCodecContext *dec = ist->dec_ctx;
2095 if (!dec->channel_layout) {
2096 char layout_name[256];
2098 if (dec->channels > ist->guess_layout_max)
2100 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2101 if (!dec->channel_layout)
2103 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2104 dec->channels, dec->channel_layout);
2105 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2106 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2111 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2113 if (*got_output || ret<0)
2114 decode_error_stat[ret<0] ++;
2116 if (ret < 0 && exit_on_error)
2119 if (exit_on_error && *got_output && ist) {
2120 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2121 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2127 // Filters can be configured only if the formats of all inputs are known.
2128 static int ifilter_has_all_input_formats(FilterGraph *fg)
2131 for (i = 0; i < fg->nb_inputs; i++) {
2132 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2133 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2139 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2141 FilterGraph *fg = ifilter->graph;
2142 int need_reinit, ret, i;
2144 /* determine if the parameters for this input changed */
2145 need_reinit = ifilter->format != frame->format;
2146 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2147 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2150 switch (ifilter->ist->st->codecpar->codec_type) {
2151 case AVMEDIA_TYPE_AUDIO:
2152 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2153 ifilter->channels != frame->channels ||
2154 ifilter->channel_layout != frame->channel_layout;
2156 case AVMEDIA_TYPE_VIDEO:
2157 need_reinit |= ifilter->width != frame->width ||
2158 ifilter->height != frame->height;
2163 ret = ifilter_parameters_from_frame(ifilter, frame);
2168 /* (re)init the graph if possible, otherwise buffer the frame and return */
2169 if (need_reinit || !fg->graph) {
2170 for (i = 0; i < fg->nb_inputs; i++) {
2171 if (!ifilter_has_all_input_formats(fg)) {
2172 AVFrame *tmp = av_frame_clone(frame);
2174 return AVERROR(ENOMEM);
2175 av_frame_unref(frame);
2177 if (!av_fifo_space(ifilter->frame_queue)) {
2178 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2180 av_frame_free(&tmp);
2184 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2189 ret = reap_filters(1);
2190 if (ret < 0 && ret != AVERROR_EOF) {
2192 av_strerror(ret, errbuf, sizeof(errbuf));
2194 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2198 ret = configure_filtergraph(fg);
2200 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2205 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2207 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2214 static int ifilter_send_eof(InputFilter *ifilter)
2220 if (ifilter->filter) {
2221 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2225 // the filtergraph was never configured
2226 FilterGraph *fg = ifilter->graph;
2227 for (i = 0; i < fg->nb_inputs; i++)
2228 if (!fg->inputs[i]->eof)
2230 if (i == fg->nb_inputs) {
2231 // All the input streams have finished without the filtergraph
2232 // ever being configured.
2233 // Mark the output streams as finished.
2234 for (j = 0; j < fg->nb_outputs; j++)
2235 finish_output_stream(fg->outputs[j]->ost);
2242 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2243 // There is the following difference: if you got a frame, you must call
2244 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2245 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2246 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2253 ret = avcodec_send_packet(avctx, pkt);
2254 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2255 // decoded frames with avcodec_receive_frame() until done.
2256 if (ret < 0 && ret != AVERROR_EOF)
2260 ret = avcodec_receive_frame(avctx, frame);
2261 if (ret < 0 && ret != AVERROR(EAGAIN))
2269 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2274 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2275 for (i = 0; i < ist->nb_filters; i++) {
2276 if (i < ist->nb_filters - 1) {
2277 f = ist->filter_frame;
2278 ret = av_frame_ref(f, decoded_frame);
2283 ret = ifilter_send_frame(ist->filters[i], f);
2284 if (ret == AVERROR_EOF)
2285 ret = 0; /* ignore */
2287 av_log(NULL, AV_LOG_ERROR,
2288 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2295 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2298 AVFrame *decoded_frame;
2299 AVCodecContext *avctx = ist->dec_ctx;
2301 AVRational decoded_frame_tb;
2303 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2304 return AVERROR(ENOMEM);
2305 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2306 return AVERROR(ENOMEM);
2307 decoded_frame = ist->decoded_frame;
2309 update_benchmark(NULL);
2310 ret = decode(avctx, decoded_frame, got_output, pkt);
2311 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2315 if (ret >= 0 && avctx->sample_rate <= 0) {
2316 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2317 ret = AVERROR_INVALIDDATA;
2320 if (ret != AVERROR_EOF)
2321 check_decode_result(ist, got_output, ret);
2323 if (!*got_output || ret < 0)
2326 ist->samples_decoded += decoded_frame->nb_samples;
2327 ist->frames_decoded++;
2330 /* increment next_dts to use for the case where the input stream does not
2331 have timestamps or there are multiple frames in the packet */
2332 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2334 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2338 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2339 decoded_frame_tb = ist->st->time_base;
2340 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2341 decoded_frame->pts = pkt->pts;
2342 decoded_frame_tb = ist->st->time_base;
2344 decoded_frame->pts = ist->dts;
2345 decoded_frame_tb = AV_TIME_BASE_Q;
2347 if (decoded_frame->pts != AV_NOPTS_VALUE)
2348 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2349 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2350 (AVRational){1, avctx->sample_rate});
2351 ist->nb_samples = decoded_frame->nb_samples;
2352 err = send_frame_to_filters(ist, decoded_frame);
2354 av_frame_unref(ist->filter_frame);
2355 av_frame_unref(decoded_frame);
2356 return err < 0 ? err : ret;
2359 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2362 AVFrame *decoded_frame;
2363 int i, ret = 0, err = 0;
2364 int64_t best_effort_timestamp;
2365 int64_t dts = AV_NOPTS_VALUE;
2368 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2369 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2371 if (!eof && pkt && pkt->size == 0)
2374 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2375 return AVERROR(ENOMEM);
2376 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2377 return AVERROR(ENOMEM);
2378 decoded_frame = ist->decoded_frame;
2379 if (ist->dts != AV_NOPTS_VALUE)
2380 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2383 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2386 // The old code used to set dts on the drain packet, which does not work
2387 // with the new API anymore.
2389 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2391 return AVERROR(ENOMEM);
2392 ist->dts_buffer = new;
2393 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2396 update_benchmark(NULL);
2397 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2398 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2402 // The following line may be required in some cases where there is no parser
2403 // or the parser does not has_b_frames correctly
2404 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2405 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2406 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2408 av_log(ist->dec_ctx, AV_LOG_WARNING,
2409 "video_delay is larger in decoder than demuxer %d > %d.\n"
2410 "If you want to help, upload a sample "
2411 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2412 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2413 ist->dec_ctx->has_b_frames,
2414 ist->st->codecpar->video_delay);
2417 if (ret != AVERROR_EOF)
2418 check_decode_result(ist, got_output, ret);
2420 if (*got_output && ret >= 0) {
2421 if (ist->dec_ctx->width != decoded_frame->width ||
2422 ist->dec_ctx->height != decoded_frame->height ||
2423 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2424 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2425 decoded_frame->width,
2426 decoded_frame->height,
2427 decoded_frame->format,
2428 ist->dec_ctx->width,
2429 ist->dec_ctx->height,
2430 ist->dec_ctx->pix_fmt);
2434 if (!*got_output || ret < 0)
2437 if(ist->top_field_first>=0)
2438 decoded_frame->top_field_first = ist->top_field_first;
2440 ist->frames_decoded++;
2442 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2443 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2447 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2449 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2451 if (ist->framerate.num)
2452 best_effort_timestamp = ist->cfr_next_pts++;
2454 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2455 best_effort_timestamp = ist->dts_buffer[0];
2457 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2458 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2459 ist->nb_dts_buffer--;
2462 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2463 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2465 if (ts != AV_NOPTS_VALUE)
2466 ist->next_pts = ist->pts = ts;
2470 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2471 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2472 ist->st->index, av_ts2str(decoded_frame->pts),
2473 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2474 best_effort_timestamp,
2475 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2476 decoded_frame->key_frame, decoded_frame->pict_type,
2477 ist->st->time_base.num, ist->st->time_base.den);
2480 if (ist->st->sample_aspect_ratio.num)
2481 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2483 err = send_frame_to_filters(ist, decoded_frame);
2486 av_frame_unref(ist->filter_frame);
2487 av_frame_unref(decoded_frame);
2488 return err < 0 ? err : ret;
2491 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2494 AVSubtitle subtitle;
2496 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2497 &subtitle, got_output, pkt);
2499 check_decode_result(NULL, got_output, ret);
2501 if (ret < 0 || !*got_output) {
2504 sub2video_flush(ist);
2508 if (ist->fix_sub_duration) {
2510 if (ist->prev_sub.got_output) {
2511 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2512 1000, AV_TIME_BASE);
2513 if (end < ist->prev_sub.subtitle.end_display_time) {
2514 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2515 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2516 ist->prev_sub.subtitle.end_display_time, end,
2517 end <= 0 ? ", dropping it" : "");
2518 ist->prev_sub.subtitle.end_display_time = end;
2521 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2522 FFSWAP(int, ret, ist->prev_sub.ret);
2523 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2531 if (ist->sub2video.frame) {
2532 sub2video_update(ist, &subtitle);
2533 } else if (ist->nb_filters) {
2534 if (!ist->sub2video.sub_queue)
2535 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2536 if (!ist->sub2video.sub_queue)
2538 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2539 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2543 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2547 if (!subtitle.num_rects)
2550 ist->frames_decoded++;
2552 for (i = 0; i < nb_output_streams; i++) {
2553 OutputStream *ost = output_streams[i];
2555 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2556 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2559 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2564 avsubtitle_free(&subtitle);
2568 static int send_filter_eof(InputStream *ist)
2571 for (i = 0; i < ist->nb_filters; i++) {
2572 ret = ifilter_send_eof(ist->filters[i]);
2579 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2580 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2584 int eof_reached = 0;
2587 if (!ist->saw_first_ts) {
2588 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2590 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2591 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2592 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2594 ist->saw_first_ts = 1;
2597 if (ist->next_dts == AV_NOPTS_VALUE)
2598 ist->next_dts = ist->dts;
2599 if (ist->next_pts == AV_NOPTS_VALUE)
2600 ist->next_pts = ist->pts;
2604 av_init_packet(&avpkt);
2611 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2612 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2613 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2614 ist->next_pts = ist->pts = ist->dts;
2617 // while we have more to decode or while the decoder did output something on EOF
2618 while (ist->decoding_needed) {
2621 int decode_failed = 0;
2623 ist->pts = ist->next_pts;
2624 ist->dts = ist->next_dts;
2626 switch (ist->dec_ctx->codec_type) {
2627 case AVMEDIA_TYPE_AUDIO:
2628 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2631 case AVMEDIA_TYPE_VIDEO:
2632 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2634 if (!repeating || !pkt || got_output) {
2635 if (pkt && pkt->duration) {
2636 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2637 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2638 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2639 duration = ((int64_t)AV_TIME_BASE *
2640 ist->dec_ctx->framerate.den * ticks) /
2641 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2644 if(ist->dts != AV_NOPTS_VALUE && duration) {
2645 ist->next_dts += duration;
2647 ist->next_dts = AV_NOPTS_VALUE;
2651 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2653 case AVMEDIA_TYPE_SUBTITLE:
2656 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2657 if (!pkt && ret >= 0)
2664 if (ret == AVERROR_EOF) {
2670 if (decode_failed) {
2671 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2672 ist->file_index, ist->st->index, av_err2str(ret));
2674 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2675 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2677 if (!decode_failed || exit_on_error)
2683 ist->got_output = 1;
2688 // During draining, we might get multiple output frames in this loop.
2689 // ffmpeg.c does not drain the filter chain on configuration changes,
2690 // which means if we send multiple frames at once to the filters, and
2691 // one of those frames changes configuration, the buffered frames will
2692 // be lost. This can upset certain FATE tests.
2693 // Decode only 1 frame per call on EOF to appease these FATE tests.
2694 // The ideal solution would be to rewrite decoding to use the new
2695 // decoding API in a better way.
2702 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2703 /* except when looping we need to flush but not to send an EOF */
2704 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2705 int ret = send_filter_eof(ist);
2707 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2712 /* handle stream copy */
2713 if (!ist->decoding_needed) {
2714 ist->dts = ist->next_dts;
2715 switch (ist->dec_ctx->codec_type) {
2716 case AVMEDIA_TYPE_AUDIO:
2717 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2718 ist->dec_ctx->sample_rate;
2720 case AVMEDIA_TYPE_VIDEO:
2721 if (ist->framerate.num) {
2722 // TODO: Remove work-around for c99-to-c89 issue 7
2723 AVRational time_base_q = AV_TIME_BASE_Q;
2724 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2725 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2726 } else if (pkt->duration) {
2727 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2728 } else if(ist->dec_ctx->framerate.num != 0) {
2729 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2730 ist->next_dts += ((int64_t)AV_TIME_BASE *
2731 ist->dec_ctx->framerate.den * ticks) /
2732 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2736 ist->pts = ist->dts;
2737 ist->next_pts = ist->next_dts;
2739 for (i = 0; pkt && i < nb_output_streams; i++) {
2740 OutputStream *ost = output_streams[i];
2742 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2745 do_streamcopy(ist, ost, pkt);
2748 return !eof_reached;
2751 static void print_sdp(void)
2756 AVIOContext *sdp_pb;
2757 AVFormatContext **avc;
2759 for (i = 0; i < nb_output_files; i++) {
2760 if (!output_files[i]->header_written)
2764 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2767 for (i = 0, j = 0; i < nb_output_files; i++) {
2768 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2769 avc[j] = output_files[i]->ctx;
2777 av_sdp_create(avc, j, sdp, sizeof(sdp));
2779 if (!sdp_filename) {
2780 printf("SDP:\n%s\n", sdp);
2783 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2784 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2786 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2787 avio_closep(&sdp_pb);
2788 av_freep(&sdp_filename);
2796 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2799 for (i = 0; hwaccels[i].name; i++)
2800 if (hwaccels[i].pix_fmt == pix_fmt)
2801 return &hwaccels[i];
2805 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2807 InputStream *ist = s->opaque;
2808 const enum AVPixelFormat *p;
2811 for (p = pix_fmts; *p != -1; p++) {
2812 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2813 const HWAccel *hwaccel;
2815 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2818 hwaccel = get_hwaccel(*p);
2820 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2821 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2824 ret = hwaccel->init(s);
2826 if (ist->hwaccel_id == hwaccel->id) {
2827 av_log(NULL, AV_LOG_FATAL,
2828 "%s hwaccel requested for input stream #%d:%d, "
2829 "but cannot be initialized.\n", hwaccel->name,
2830 ist->file_index, ist->st->index);
2831 return AV_PIX_FMT_NONE;
2836 if (ist->hw_frames_ctx) {
2837 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2838 if (!s->hw_frames_ctx)
2839 return AV_PIX_FMT_NONE;
2842 ist->active_hwaccel_id = hwaccel->id;
2843 ist->hwaccel_pix_fmt = *p;
2850 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2852 InputStream *ist = s->opaque;
2854 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2855 return ist->hwaccel_get_buffer(s, frame, flags);
2857 return avcodec_default_get_buffer2(s, frame, flags);
2860 static int init_input_stream(int ist_index, char *error, int error_len)
2863 InputStream *ist = input_streams[ist_index];
2865 if (ist->decoding_needed) {
2866 AVCodec *codec = ist->dec;
2868 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2869 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2870 return AVERROR(EINVAL);
2873 ist->dec_ctx->opaque = ist;
2874 ist->dec_ctx->get_format = get_format;
2875 ist->dec_ctx->get_buffer2 = get_buffer;
2876 ist->dec_ctx->thread_safe_callbacks = 1;
2878 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2879 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2880 (ist->decoding_needed & DECODING_FOR_OST)) {
2881 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2882 if (ist->decoding_needed & DECODING_FOR_FILTER)
2883 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2886 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2888 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2889 * audio, and video decoders such as cuvid or mediacodec */
2890 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2892 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2893 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2894 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2895 if (ret == AVERROR_EXPERIMENTAL)
2896 abort_codec_experimental(codec, 0);
2898 snprintf(error, error_len,
2899 "Error while opening decoder for input stream "
2901 ist->file_index, ist->st->index, av_err2str(ret));
2904 assert_avoptions(ist->decoder_opts);
2907 ist->next_pts = AV_NOPTS_VALUE;
2908 ist->next_dts = AV_NOPTS_VALUE;
2913 static InputStream *get_input_stream(OutputStream *ost)
2915 if (ost->source_index >= 0)
2916 return input_streams[ost->source_index];
2920 static int compare_int64(const void *a, const void *b)
2922 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2925 /* open the muxer when all the streams are initialized */
2926 static int check_init_output_file(OutputFile *of, int file_index)
2930 for (i = 0; i < of->ctx->nb_streams; i++) {
2931 OutputStream *ost = output_streams[of->ost_index + i];
2932 if (!ost->initialized)
2936 of->ctx->interrupt_callback = int_cb;
2938 ret = avformat_write_header(of->ctx, &of->opts);
2940 av_log(NULL, AV_LOG_ERROR,
2941 "Could not write header for output file #%d "
2942 "(incorrect codec parameters ?): %s\n",
2943 file_index, av_err2str(ret));
2946 //assert_avoptions(of->opts);
2947 of->header_written = 1;
2949 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2951 if (sdp_filename || want_sdp)
2954 /* flush the muxing queues */
2955 for (i = 0; i < of->ctx->nb_streams; i++) {
2956 OutputStream *ost = output_streams[of->ost_index + i];
2958 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2959 if (!av_fifo_size(ost->muxing_queue))
2960 ost->mux_timebase = ost->st->time_base;
2962 while (av_fifo_size(ost->muxing_queue)) {
2964 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2965 write_packet(of, &pkt, ost);
2972 static int init_output_bsfs(OutputStream *ost)
2977 if (!ost->nb_bitstream_filters)
2980 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2981 ctx = ost->bsf_ctx[i];
2983 ret = avcodec_parameters_copy(ctx->par_in,
2984 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2988 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2990 ret = av_bsf_init(ctx);
2992 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2993 ost->bsf_ctx[i]->filter->name);
2998 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2999 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3003 ost->st->time_base = ctx->time_base_out;
3008 static int init_output_stream_streamcopy(OutputStream *ost)
3010 OutputFile *of = output_files[ost->file_index];
3011 InputStream *ist = get_input_stream(ost);
3012 AVCodecParameters *par_dst = ost->st->codecpar;
3013 AVCodecParameters *par_src = ost->ref_par;
3016 uint32_t codec_tag = par_dst->codec_tag;
3018 av_assert0(ist && !ost->filter);
3020 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3022 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3024 av_log(NULL, AV_LOG_FATAL,
3025 "Error setting up codec context options.\n");
3028 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3031 unsigned int codec_tag_tmp;
3032 if (!of->ctx->oformat->codec_tag ||
3033 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3034 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3035 codec_tag = par_src->codec_tag;
3038 ret = avcodec_parameters_copy(par_dst, par_src);
3042 par_dst->codec_tag = codec_tag;
3044 if (!ost->frame_rate.num)
3045 ost->frame_rate = ist->framerate;
3046 ost->st->avg_frame_rate = ost->frame_rate;
3048 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3052 // copy timebase while removing common factors
3053 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3054 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3056 // copy estimated duration as a hint to the muxer
3057 if (ost->st->duration <= 0 && ist->st->duration > 0)
3058 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3061 ost->st->disposition = ist->st->disposition;
3063 if (ist->st->nb_side_data) {
3064 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3065 sizeof(*ist->st->side_data));
3066 if (!ost->st->side_data)
3067 return AVERROR(ENOMEM);
3069 ost->st->nb_side_data = 0;
3070 for (i = 0; i < ist->st->nb_side_data; i++) {
3071 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3072 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3074 sd_dst->data = av_malloc(sd_src->size);
3076 return AVERROR(ENOMEM);
3077 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3078 sd_dst->size = sd_src->size;
3079 sd_dst->type = sd_src->type;
3080 ost->st->nb_side_data++;
3084 if (ost->rotate_overridden) {
3085 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3086 sizeof(int32_t) * 9);
3088 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3091 ost->parser = av_parser_init(par_dst->codec_id);
3092 ost->parser_avctx = avcodec_alloc_context3(NULL);
3093 if (!ost->parser_avctx)
3094 return AVERROR(ENOMEM);
3096 switch (par_dst->codec_type) {
3097 case AVMEDIA_TYPE_AUDIO:
3098 if (audio_volume != 256) {
3099 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3102 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3103 par_dst->block_align= 0;
3104 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3105 par_dst->block_align= 0;
3107 case AVMEDIA_TYPE_VIDEO:
3108 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3110 av_mul_q(ost->frame_aspect_ratio,
3111 (AVRational){ par_dst->height, par_dst->width });
3112 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3113 "with stream copy may produce invalid files\n");
3115 else if (ist->st->sample_aspect_ratio.num)
3116 sar = ist->st->sample_aspect_ratio;
3118 sar = par_src->sample_aspect_ratio;
3119 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3120 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3121 ost->st->r_frame_rate = ist->st->r_frame_rate;
3125 ost->mux_timebase = ist->st->time_base;
3130 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3132 AVDictionaryEntry *e;
3134 uint8_t *encoder_string;
3135 int encoder_string_len;
3136 int format_flags = 0;
3137 int codec_flags = 0;
3139 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3142 e = av_dict_get(of->opts, "fflags", NULL, 0);
3144 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3147 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3149 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3151 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3154 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3157 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3158 encoder_string = av_mallocz(encoder_string_len);
3159 if (!encoder_string)
3162 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3163 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3165 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3166 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3167 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3168 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3171 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3172 AVCodecContext *avctx)
3175 int n = 1, i, size, index = 0;
3178 for (p = kf; *p; p++)
3182 pts = av_malloc_array(size, sizeof(*pts));
3184 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3189 for (i = 0; i < n; i++) {
3190 char *next = strchr(p, ',');
3195 if (!memcmp(p, "chapters", 8)) {
3197 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3200 if (avf->nb_chapters > INT_MAX - size ||
3201 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3203 av_log(NULL, AV_LOG_FATAL,
3204 "Could not allocate forced key frames array.\n");
3207 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3208 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3210 for (j = 0; j < avf->nb_chapters; j++) {
3211 AVChapter *c = avf->chapters[j];
3212 av_assert1(index < size);
3213 pts[index++] = av_rescale_q(c->start, c->time_base,
3214 avctx->time_base) + t;
3219 t = parse_time_or_die("force_key_frames", p, 1);
3220 av_assert1(index < size);
3221 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3228 av_assert0(index == size);
3229 qsort(pts, size, sizeof(*pts), compare_int64);
3230 ost->forced_kf_count = size;
3231 ost->forced_kf_pts = pts;
3234 static int init_output_stream_encode(OutputStream *ost)
3236 InputStream *ist = get_input_stream(ost);
3237 AVCodecContext *enc_ctx = ost->enc_ctx;
3238 AVCodecContext *dec_ctx = NULL;
3239 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3242 set_encoder_id(output_files[ost->file_index], ost);
3244 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3245 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3246 // which have to be filtered out to prevent leaking them to output files.
3247 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3250 ost->st->disposition = ist->st->disposition;
3252 dec_ctx = ist->dec_ctx;
3254 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3256 for (j = 0; j < oc->nb_streams; j++) {
3257 AVStream *st = oc->streams[j];
3258 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3261 if (j == oc->nb_streams)
3262 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3263 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3264 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3267 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3268 if (!ost->frame_rate.num)
3269 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3270 if (ist && !ost->frame_rate.num)
3271 ost->frame_rate = ist->framerate;
3272 if (ist && !ost->frame_rate.num)
3273 ost->frame_rate = ist->st->r_frame_rate;
3274 if (ist && !ost->frame_rate.num) {
3275 ost->frame_rate = (AVRational){25, 1};
3276 av_log(NULL, AV_LOG_WARNING,
3278 "about the input framerate is available. Falling "
3279 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3280 "if you want a different framerate.\n",
3281 ost->file_index, ost->index);
3283 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3284 if (ost->enc->supported_framerates && !ost->force_fps) {
3285 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3286 ost->frame_rate = ost->enc->supported_framerates[idx];
3288 // reduce frame rate for mpeg4 to be within the spec limits
3289 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3290 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3291 ost->frame_rate.num, ost->frame_rate.den, 65535);
3295 switch (enc_ctx->codec_type) {
3296 case AVMEDIA_TYPE_AUDIO:
3297 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3299 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3300 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3301 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3302 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3303 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3304 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3306 case AVMEDIA_TYPE_VIDEO:
3307 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3308 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3309 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3310 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3311 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3312 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3313 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3315 for (j = 0; j < ost->forced_kf_count; j++)
3316 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3318 enc_ctx->time_base);
3320 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3321 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3322 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3323 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3324 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3325 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3326 if (!strncmp(ost->enc->name, "libx264", 7) &&
3327 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3328 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3329 av_log(NULL, AV_LOG_WARNING,
3330 "No pixel format specified, %s for H.264 encoding chosen.\n"
3331 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3332 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3333 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3334 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3335 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3336 av_log(NULL, AV_LOG_WARNING,
3337 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3338 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3339 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3340 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3342 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3343 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3345 enc_ctx->framerate = ost->frame_rate;
3347 ost->st->avg_frame_rate = ost->frame_rate;
3350 enc_ctx->width != dec_ctx->width ||
3351 enc_ctx->height != dec_ctx->height ||
3352 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3353 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3356 if (ost->forced_keyframes) {
3357 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3358 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3359 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3361 av_log(NULL, AV_LOG_ERROR,
3362 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3365 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3366 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3367 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3368 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3370 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3371 // parse it only for static kf timings
3372 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3373 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3377 case AVMEDIA_TYPE_SUBTITLE:
3378 enc_ctx->time_base = AV_TIME_BASE_Q;
3379 if (!enc_ctx->width) {
3380 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3381 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3384 case AVMEDIA_TYPE_DATA:
3391 ost->mux_timebase = enc_ctx->time_base;
3396 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3400 if (ost->encoding_needed) {
3401 AVCodec *codec = ost->enc;
3402 AVCodecContext *dec = NULL;
3405 ret = init_output_stream_encode(ost);
3409 if ((ist = get_input_stream(ost)))
3411 if (dec && dec->subtitle_header) {
3412 /* ASS code assumes this buffer is null terminated so add extra byte. */
3413 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3414 if (!ost->enc_ctx->subtitle_header)
3415 return AVERROR(ENOMEM);
3416 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3417 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3419 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3420 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3421 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3423 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3424 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3425 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3427 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3428 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3429 av_buffersink_get_format(ost->filter->filter)) {
3430 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3431 if (!ost->enc_ctx->hw_frames_ctx)
3432 return AVERROR(ENOMEM);
3435 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3436 if (ret == AVERROR_EXPERIMENTAL)
3437 abort_codec_experimental(codec, 1);
3438 snprintf(error, error_len,
3439 "Error while opening encoder for output stream #%d:%d - "
3440 "maybe incorrect parameters such as bit_rate, rate, width or height",
3441 ost->file_index, ost->index);
3444 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3445 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3446 av_buffersink_set_frame_size(ost->filter->filter,
3447 ost->enc_ctx->frame_size);
3448 assert_avoptions(ost->encoder_opts);
3449 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3450 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3451 " It takes bits/s as argument, not kbits/s\n");
3453 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3455 av_log(NULL, AV_LOG_FATAL,
3456 "Error initializing the output stream codec context.\n");
3460 * FIXME: ost->st->codec should't be needed here anymore.
3462 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3466 if (ost->enc_ctx->nb_coded_side_data) {
3469 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3470 sizeof(*ost->st->side_data));
3471 if (!ost->st->side_data)
3472 return AVERROR(ENOMEM);
3474 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3475 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3476 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3478 sd_dst->data = av_malloc(sd_src->size);
3480 return AVERROR(ENOMEM);
3481 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3482 sd_dst->size = sd_src->size;
3483 sd_dst->type = sd_src->type;
3484 ost->st->nb_side_data++;
3489 * Add global input side data. For now this is naive, and copies it
3490 * from the input stream's global side data. All side data should
3491 * really be funneled over AVFrame and libavfilter, then added back to
3492 * packet side data, and then potentially using the first packet for
3497 for (i = 0; i < ist->st->nb_side_data; i++) {
3498 AVPacketSideData *sd = &ist->st->side_data[i];
3499 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3501 return AVERROR(ENOMEM);
3502 memcpy(dst, sd->data, sd->size);
3503 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3504 av_display_rotation_set((uint32_t *)dst, 0);
3508 // copy timebase while removing common factors
3509 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3510 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3512 // copy estimated duration as a hint to the muxer
3513 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3514 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3516 ost->st->codec->codec= ost->enc_ctx->codec;
3517 } else if (ost->stream_copy) {
3518 ret = init_output_stream_streamcopy(ost);
3523 * FIXME: will the codec context used by the parser during streamcopy
3524 * This should go away with the new parser API.
3526 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3531 // parse user provided disposition, and update stream values
3532 if (ost->disposition) {
3533 static const AVOption opts[] = {
3534 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3535 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3536 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3537 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3538 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3539 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3540 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3541 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3542 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3543 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3544 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3545 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3546 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3547 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3550 static const AVClass class = {
3552 .item_name = av_default_item_name,
3554 .version = LIBAVUTIL_VERSION_INT,
3556 const AVClass *pclass = &class;
3558 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3563 /* initialize bitstream filters for the output stream
3564 * needs to be done here, because the codec id for streamcopy is not
3565 * known until now */
3566 ret = init_output_bsfs(ost);
3570 ost->initialized = 1;
3572 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3579 static void report_new_stream(int input_index, AVPacket *pkt)
3581 InputFile *file = input_files[input_index];
3582 AVStream *st = file->ctx->streams[pkt->stream_index];
3584 if (pkt->stream_index < file->nb_streams_warn)
3586 av_log(file->ctx, AV_LOG_WARNING,
3587 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3588 av_get_media_type_string(st->codecpar->codec_type),
3589 input_index, pkt->stream_index,
3590 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3591 file->nb_streams_warn = pkt->stream_index + 1;
3594 static int transcode_init(void)
3596 int ret = 0, i, j, k;
3597 AVFormatContext *oc;
3600 char error[1024] = {0};
3602 for (i = 0; i < nb_filtergraphs; i++) {
3603 FilterGraph *fg = filtergraphs[i];
3604 for (j = 0; j < fg->nb_outputs; j++) {
3605 OutputFilter *ofilter = fg->outputs[j];
3606 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3608 if (fg->nb_inputs != 1)
3610 for (k = nb_input_streams-1; k >= 0 ; k--)
3611 if (fg->inputs[0]->ist == input_streams[k])
3613 ofilter->ost->source_index = k;
3617 /* init framerate emulation */
3618 for (i = 0; i < nb_input_files; i++) {
3619 InputFile *ifile = input_files[i];
3620 if (ifile->rate_emu)
3621 for (j = 0; j < ifile->nb_streams; j++)
3622 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3625 /* init input streams */
3626 for (i = 0; i < nb_input_streams; i++)
3627 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3628 for (i = 0; i < nb_output_streams; i++) {
3629 ost = output_streams[i];
3630 avcodec_close(ost->enc_ctx);
3635 /* open each encoder */
3636 for (i = 0; i < nb_output_streams; i++) {
3637 // skip streams fed from filtergraphs until we have a frame for them
3638 if (output_streams[i]->filter)
3641 ret = init_output_stream(output_streams[i], error, sizeof(error));
3646 /* discard unused programs */
3647 for (i = 0; i < nb_input_files; i++) {
3648 InputFile *ifile = input_files[i];
3649 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3650 AVProgram *p = ifile->ctx->programs[j];
3651 int discard = AVDISCARD_ALL;
3653 for (k = 0; k < p->nb_stream_indexes; k++)
3654 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3655 discard = AVDISCARD_DEFAULT;
3658 p->discard = discard;
3662 /* write headers for files with no streams */
3663 for (i = 0; i < nb_output_files; i++) {
3664 oc = output_files[i]->ctx;
3665 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3666 ret = check_init_output_file(output_files[i], i);
3673 /* dump the stream mapping */
3674 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3675 for (i = 0; i < nb_input_streams; i++) {
3676 ist = input_streams[i];
3678 for (j = 0; j < ist->nb_filters; j++) {
3679 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3680 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3681 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3682 ist->filters[j]->name);
3683 if (nb_filtergraphs > 1)
3684 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3685 av_log(NULL, AV_LOG_INFO, "\n");
3690 for (i = 0; i < nb_output_streams; i++) {
3691 ost = output_streams[i];
3693 if (ost->attachment_filename) {
3694 /* an attached file */
3695 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3696 ost->attachment_filename, ost->file_index, ost->index);
3700 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3701 /* output from a complex graph */
3702 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3703 if (nb_filtergraphs > 1)
3704 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3706 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3707 ost->index, ost->enc ? ost->enc->name : "?");
3711 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3712 input_streams[ost->source_index]->file_index,
3713 input_streams[ost->source_index]->st->index,
3716 if (ost->sync_ist != input_streams[ost->source_index])
3717 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3718 ost->sync_ist->file_index,
3719 ost->sync_ist->st->index);
3720 if (ost->stream_copy)
3721 av_log(NULL, AV_LOG_INFO, " (copy)");
3723 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3724 const AVCodec *out_codec = ost->enc;
3725 const char *decoder_name = "?";
3726 const char *in_codec_name = "?";
3727 const char *encoder_name = "?";
3728 const char *out_codec_name = "?";
3729 const AVCodecDescriptor *desc;
3732 decoder_name = in_codec->name;
3733 desc = avcodec_descriptor_get(in_codec->id);
3735 in_codec_name = desc->name;
3736 if (!strcmp(decoder_name, in_codec_name))
3737 decoder_name = "native";
3741 encoder_name = out_codec->name;
3742 desc = avcodec_descriptor_get(out_codec->id);
3744 out_codec_name = desc->name;
3745 if (!strcmp(encoder_name, out_codec_name))
3746 encoder_name = "native";
3749 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3750 in_codec_name, decoder_name,
3751 out_codec_name, encoder_name);
3753 av_log(NULL, AV_LOG_INFO, "\n");
3757 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3761 transcode_init_done = 1;
3766 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3767 static int need_output(void)
3771 for (i = 0; i < nb_output_streams; i++) {
3772 OutputStream *ost = output_streams[i];
3773 OutputFile *of = output_files[ost->file_index];
3774 AVFormatContext *os = output_files[ost->file_index]->ctx;
3776 if (ost->finished ||
3777 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3779 if (ost->frame_number >= ost->max_frames) {
3781 for (j = 0; j < of->ctx->nb_streams; j++)
3782 close_output_stream(output_streams[of->ost_index + j]);
3793 * Select the output stream to process.
3795 * @return selected output stream, or NULL if none available
3797 static OutputStream *choose_output(void)
3800 int64_t opts_min = INT64_MAX;
3801 OutputStream *ost_min = NULL;
3803 for (i = 0; i < nb_output_streams; i++) {
3804 OutputStream *ost = output_streams[i];
3805 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3806 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3808 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3809 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3811 if (!ost->initialized && !ost->inputs_done)
3814 if (!ost->finished && opts < opts_min) {
3816 ost_min = ost->unavailable ? NULL : ost;
3822 static void set_tty_echo(int on)
3826 if (tcgetattr(0, &tty) == 0) {
3827 if (on) tty.c_lflag |= ECHO;
3828 else tty.c_lflag &= ~ECHO;
3829 tcsetattr(0, TCSANOW, &tty);
3834 static int check_keyboard_interaction(int64_t cur_time)
3837 static int64_t last_time;
3838 if (received_nb_signals)
3839 return AVERROR_EXIT;
3840 /* read_key() returns 0 on EOF */
3841 if(cur_time - last_time >= 100000 && !run_as_daemon){
3843 last_time = cur_time;
3847 return AVERROR_EXIT;
3848 if (key == '+') av_log_set_level(av_log_get_level()+10);
3849 if (key == '-') av_log_set_level(av_log_get_level()-10);
3850 if (key == 's') qp_hist ^= 1;
3853 do_hex_dump = do_pkt_dump = 0;
3854 } else if(do_pkt_dump){
3858 av_log_set_level(AV_LOG_DEBUG);
3860 if (key == 'c' || key == 'C'){
3861 char buf[4096], target[64], command[256], arg[256] = {0};
3864 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3867 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3872 fprintf(stderr, "\n");
3874 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3875 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3876 target, time, command, arg);
3877 for (i = 0; i < nb_filtergraphs; i++) {
3878 FilterGraph *fg = filtergraphs[i];
3881 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3882 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3883 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3884 } else if (key == 'c') {
3885 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3886 ret = AVERROR_PATCHWELCOME;
3888 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3890 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3895 av_log(NULL, AV_LOG_ERROR,
3896 "Parse error, at least 3 arguments were expected, "
3897 "only %d given in string '%s'\n", n, buf);
3900 if (key == 'd' || key == 'D'){
3903 debug = input_streams[0]->st->codec->debug<<1;
3904 if(!debug) debug = 1;
3905 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3912 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3917 fprintf(stderr, "\n");
3918 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3919 fprintf(stderr,"error parsing debug value\n");
3921 for(i=0;i<nb_input_streams;i++) {
3922 input_streams[i]->st->codec->debug = debug;
3924 for(i=0;i<nb_output_streams;i++) {
3925 OutputStream *ost = output_streams[i];
3926 ost->enc_ctx->debug = debug;
3928 if(debug) av_log_set_level(AV_LOG_DEBUG);
3929 fprintf(stderr,"debug=%d\n", debug);
3932 fprintf(stderr, "key function\n"
3933 "? show this help\n"
3934 "+ increase verbosity\n"
3935 "- decrease verbosity\n"
3936 "c Send command to first matching filter supporting it\n"
3937 "C Send/Queue command to all matching filters\n"
3938 "D cycle through available debug modes\n"
3939 "h dump packets/hex press to cycle through the 3 states\n"
3941 "s Show QP histogram\n"
3948 static void *input_thread(void *arg)
3951 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3956 ret = av_read_frame(f->ctx, &pkt);
3958 if (ret == AVERROR(EAGAIN)) {
3963 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3966 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3967 if (flags && ret == AVERROR(EAGAIN)) {
3969 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3970 av_log(f->ctx, AV_LOG_WARNING,
3971 "Thread message queue blocking; consider raising the "
3972 "thread_queue_size option (current value: %d)\n",
3973 f->thread_queue_size);
3976 if (ret != AVERROR_EOF)
3977 av_log(f->ctx, AV_LOG_ERROR,
3978 "Unable to send packet to main thread: %s\n",
3980 av_packet_unref(&pkt);
3981 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3989 static void free_input_threads(void)
3993 for (i = 0; i < nb_input_files; i++) {
3994 InputFile *f = input_files[i];
3997 if (!f || !f->in_thread_queue)
3999 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4000 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4001 av_packet_unref(&pkt);
4003 pthread_join(f->thread, NULL);
4005 av_thread_message_queue_free(&f->in_thread_queue);
4009 static int init_input_threads(void)
4013 if (nb_input_files == 1)
4016 for (i = 0; i < nb_input_files; i++) {
4017 InputFile *f = input_files[i];
4019 if (f->ctx->pb ? !f->ctx->pb->seekable :
4020 strcmp(f->ctx->iformat->name, "lavfi"))
4021 f->non_blocking = 1;
4022 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4023 f->thread_queue_size, sizeof(AVPacket));
4027 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4028 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4029 av_thread_message_queue_free(&f->in_thread_queue);
4030 return AVERROR(ret);
4036 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4038 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4040 AV_THREAD_MESSAGE_NONBLOCK : 0);
4044 static int get_input_packet(InputFile *f, AVPacket *pkt)
4048 for (i = 0; i < f->nb_streams; i++) {
4049 InputStream *ist = input_streams[f->ist_index + i];
4050 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4051 int64_t now = av_gettime_relative() - ist->start;
4053 return AVERROR(EAGAIN);
4058 if (nb_input_files > 1)
4059 return get_input_packet_mt(f, pkt);
4061 return av_read_frame(f->ctx, pkt);
4064 static int got_eagain(void)
4067 for (i = 0; i < nb_output_streams; i++)
4068 if (output_streams[i]->unavailable)
4073 static void reset_eagain(void)
4076 for (i = 0; i < nb_input_files; i++)
4077 input_files[i]->eagain = 0;
4078 for (i = 0; i < nb_output_streams; i++)
4079 output_streams[i]->unavailable = 0;
4082 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4083 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4084 AVRational time_base)
4090 return tmp_time_base;
4093 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4096 return tmp_time_base;
4102 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4105 AVCodecContext *avctx;
4106 int i, ret, has_audio = 0;
4107 int64_t duration = 0;
4109 ret = av_seek_frame(is, -1, is->start_time, 0);
4113 for (i = 0; i < ifile->nb_streams; i++) {
4114 ist = input_streams[ifile->ist_index + i];
4115 avctx = ist->dec_ctx;
4118 if (ist->decoding_needed) {
4119 process_input_packet(ist, NULL, 1);
4120 avcodec_flush_buffers(avctx);
4123 /* duration is the length of the last frame in a stream
4124 * when audio stream is present we don't care about
4125 * last video frame length because it's not defined exactly */
4126 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4130 for (i = 0; i < ifile->nb_streams; i++) {
4131 ist = input_streams[ifile->ist_index + i];
4132 avctx = ist->dec_ctx;
4135 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4136 AVRational sample_rate = {1, avctx->sample_rate};
4138 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4142 if (ist->framerate.num) {
4143 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4144 } else if (ist->st->avg_frame_rate.num) {
4145 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4146 } else duration = 1;
4148 if (!ifile->duration)
4149 ifile->time_base = ist->st->time_base;
4150 /* the total duration of the stream, max_pts - min_pts is
4151 * the duration of the stream without the last frame */
4152 duration += ist->max_pts - ist->min_pts;
4153 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4157 if (ifile->loop > 0)
4165 * - 0 -- one packet was read and processed
4166 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4167 * this function should be called again
4168 * - AVERROR_EOF -- this function should not be called again
4170 static int process_input(int file_index)
4172 InputFile *ifile = input_files[file_index];
4173 AVFormatContext *is;
4181 ret = get_input_packet(ifile, &pkt);
4183 if (ret == AVERROR(EAGAIN)) {
4187 if (ret < 0 && ifile->loop) {
4188 if ((ret = seek_to_start(ifile, is)) < 0)
4190 ret = get_input_packet(ifile, &pkt);
4191 if (ret == AVERROR(EAGAIN)) {
4197 if (ret != AVERROR_EOF) {
4198 print_error(is->filename, ret);
4203 for (i = 0; i < ifile->nb_streams; i++) {
4204 ist = input_streams[ifile->ist_index + i];
4205 if (ist->decoding_needed) {
4206 ret = process_input_packet(ist, NULL, 0);
4211 /* mark all outputs that don't go through lavfi as finished */
4212 for (j = 0; j < nb_output_streams; j++) {
4213 OutputStream *ost = output_streams[j];
4215 if (ost->source_index == ifile->ist_index + i &&
4216 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4217 finish_output_stream(ost);
4221 ifile->eof_reached = 1;
4222 return AVERROR(EAGAIN);
4228 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4229 is->streams[pkt.stream_index]);
4231 /* the following test is needed in case new streams appear
4232 dynamically in stream : we ignore them */
4233 if (pkt.stream_index >= ifile->nb_streams) {
4234 report_new_stream(file_index, &pkt);
4235 goto discard_packet;
4238 ist = input_streams[ifile->ist_index + pkt.stream_index];
4240 ist->data_size += pkt.size;
4244 goto discard_packet;
4246 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4247 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4252 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4253 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4254 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4255 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4256 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4257 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4258 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4259 av_ts2str(input_files[ist->file_index]->ts_offset),
4260 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4263 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4264 int64_t stime, stime2;
4265 // Correcting starttime based on the enabled streams
4266 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4267 // so we instead do it here as part of discontinuity handling
4268 if ( ist->next_dts == AV_NOPTS_VALUE
4269 && ifile->ts_offset == -is->start_time
4270 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4271 int64_t new_start_time = INT64_MAX;
4272 for (i=0; i<is->nb_streams; i++) {
4273 AVStream *st = is->streams[i];
4274 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4276 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4278 if (new_start_time > is->start_time) {
4279 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4280 ifile->ts_offset = -new_start_time;
4284 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4285 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4286 ist->wrap_correction_done = 1;
4288 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4289 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4290 ist->wrap_correction_done = 0;
4292 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4293 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4294 ist->wrap_correction_done = 0;
4298 /* add the stream-global side data to the first packet */
4299 if (ist->nb_packets == 1) {
4300 for (i = 0; i < ist->st->nb_side_data; i++) {
4301 AVPacketSideData *src_sd = &ist->st->side_data[i];
4304 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4307 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4310 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4314 memcpy(dst_data, src_sd->data, src_sd->size);
4318 if (pkt.dts != AV_NOPTS_VALUE)
4319 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4320 if (pkt.pts != AV_NOPTS_VALUE)
4321 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4323 if (pkt.pts != AV_NOPTS_VALUE)
4324 pkt.pts *= ist->ts_scale;
4325 if (pkt.dts != AV_NOPTS_VALUE)
4326 pkt.dts *= ist->ts_scale;
4328 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4329 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4330 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4331 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4332 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4333 int64_t delta = pkt_dts - ifile->last_ts;
4334 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4335 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4336 ifile->ts_offset -= delta;
4337 av_log(NULL, AV_LOG_DEBUG,
4338 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4339 delta, ifile->ts_offset);
4340 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4341 if (pkt.pts != AV_NOPTS_VALUE)
4342 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4346 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4347 if (pkt.pts != AV_NOPTS_VALUE) {
4348 pkt.pts += duration;
4349 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4350 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4353 if (pkt.dts != AV_NOPTS_VALUE)
4354 pkt.dts += duration;
4356 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4357 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4358 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4359 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4361 int64_t delta = pkt_dts - ist->next_dts;
4362 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4363 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4364 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4365 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4366 ifile->ts_offset -= delta;
4367 av_log(NULL, AV_LOG_DEBUG,
4368 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4369 delta, ifile->ts_offset);
4370 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4371 if (pkt.pts != AV_NOPTS_VALUE)
4372 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4375 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4376 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4377 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4378 pkt.dts = AV_NOPTS_VALUE;
4380 if (pkt.pts != AV_NOPTS_VALUE){
4381 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4382 delta = pkt_pts - ist->next_dts;
4383 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4384 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4385 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4386 pkt.pts = AV_NOPTS_VALUE;
4392 if (pkt.dts != AV_NOPTS_VALUE)
4393 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4396 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4397 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4398 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4399 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4400 av_ts2str(input_files[ist->file_index]->ts_offset),
4401 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4404 sub2video_heartbeat(ist, pkt.pts);
4406 process_input_packet(ist, &pkt, 0);
4409 av_packet_unref(&pkt);
4415 * Perform a step of transcoding for the specified filter graph.
4417 * @param[in] graph filter graph to consider
4418 * @param[out] best_ist input stream where a frame would allow to continue
4419 * @return 0 for success, <0 for error
4421 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4424 int nb_requests, nb_requests_max = 0;
4425 InputFilter *ifilter;
4429 ret = avfilter_graph_request_oldest(graph->graph);
4431 return reap_filters(0);
4433 if (ret == AVERROR_EOF) {
4434 ret = reap_filters(1);
4435 for (i = 0; i < graph->nb_outputs; i++)
4436 close_output_stream(graph->outputs[i]->ost);
4439 if (ret != AVERROR(EAGAIN))
4442 for (i = 0; i < graph->nb_inputs; i++) {
4443 ifilter = graph->inputs[i];
4445 if (input_files[ist->file_index]->eagain ||
4446 input_files[ist->file_index]->eof_reached)
4448 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4449 if (nb_requests > nb_requests_max) {
4450 nb_requests_max = nb_requests;
4456 for (i = 0; i < graph->nb_outputs; i++)
4457 graph->outputs[i]->ost->unavailable = 1;
4463 * Run a single step of transcoding.
4465 * @return 0 for success, <0 for error
4467 static int transcode_step(void)
4470 InputStream *ist = NULL;
4473 ost = choose_output();
4480 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4484 if (ost->filter && !ost->filter->graph->graph) {
4485 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4486 ret = configure_filtergraph(ost->filter->graph);
4488 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4494 if (ost->filter && ost->filter->graph->graph) {
4495 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4499 } else if (ost->filter) {
4501 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4502 InputFilter *ifilter = ost->filter->graph->inputs[i];
4503 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4509 ost->inputs_done = 1;
4513 av_assert0(ost->source_index >= 0);
4514 ist = input_streams[ost->source_index];
4517 ret = process_input(ist->file_index);
4518 if (ret == AVERROR(EAGAIN)) {
4519 if (input_files[ist->file_index]->eagain)
4520 ost->unavailable = 1;
4525 return ret == AVERROR_EOF ? 0 : ret;
4527 return reap_filters(0);
4531 * The following code is the main loop of the file converter
4533 static int transcode(void)
4536 AVFormatContext *os;
4539 int64_t timer_start;
4540 int64_t total_packets_written = 0;
4542 ret = transcode_init();
4546 if (stdin_interaction) {
4547 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4550 timer_start = av_gettime_relative();
4553 if ((ret = init_input_threads()) < 0)
4557 while (!received_sigterm) {
4558 int64_t cur_time= av_gettime_relative();
4560 /* if 'q' pressed, exits */
4561 if (stdin_interaction)
4562 if (check_keyboard_interaction(cur_time) < 0)
4565 /* check if there's any stream where output is still needed */
4566 if (!need_output()) {
4567 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4571 ret = transcode_step();
4572 if (ret < 0 && ret != AVERROR_EOF) {
4574 av_strerror(ret, errbuf, sizeof(errbuf));
4576 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4580 /* dump report by using the output first video and audio streams */
4581 print_report(0, timer_start, cur_time);
4584 free_input_threads();
4587 /* at the end of stream, we must flush the decoder buffers */
4588 for (i = 0; i < nb_input_streams; i++) {
4589 ist = input_streams[i];
4590 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4591 process_input_packet(ist, NULL, 0);
4598 /* write the trailer if needed and close file */
4599 for (i = 0; i < nb_output_files; i++) {
4600 os = output_files[i]->ctx;
4601 if (!output_files[i]->header_written) {
4602 av_log(NULL, AV_LOG_ERROR,
4603 "Nothing was written into output file %d (%s), because "
4604 "at least one of its streams received no packets.\n",
4608 if ((ret = av_write_trailer(os)) < 0) {
4609 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4615 /* dump report by using the first video and audio streams */
4616 print_report(1, timer_start, av_gettime_relative());
4618 /* close each encoder */
4619 for (i = 0; i < nb_output_streams; i++) {
4620 ost = output_streams[i];
4621 if (ost->encoding_needed) {
4622 av_freep(&ost->enc_ctx->stats_in);
4624 total_packets_written += ost->packets_written;
4627 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4628 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4632 /* close each decoder */
4633 for (i = 0; i < nb_input_streams; i++) {
4634 ist = input_streams[i];
4635 if (ist->decoding_needed) {
4636 avcodec_close(ist->dec_ctx);
4637 if (ist->hwaccel_uninit)
4638 ist->hwaccel_uninit(ist->dec_ctx);
4642 av_buffer_unref(&hw_device_ctx);
4649 free_input_threads();
4652 if (output_streams) {
4653 for (i = 0; i < nb_output_streams; i++) {
4654 ost = output_streams[i];
4657 if (fclose(ost->logfile))
4658 av_log(NULL, AV_LOG_ERROR,
4659 "Error closing logfile, loss of information possible: %s\n",
4660 av_err2str(AVERROR(errno)));
4661 ost->logfile = NULL;
4663 av_freep(&ost->forced_kf_pts);
4664 av_freep(&ost->apad);
4665 av_freep(&ost->disposition);
4666 av_dict_free(&ost->encoder_opts);
4667 av_dict_free(&ost->sws_dict);
4668 av_dict_free(&ost->swr_opts);
4669 av_dict_free(&ost->resample_opts);
4677 static int64_t getutime(void)
4680 struct rusage rusage;
4682 getrusage(RUSAGE_SELF, &rusage);
4683 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4684 #elif HAVE_GETPROCESSTIMES
4686 FILETIME c, e, k, u;
4687 proc = GetCurrentProcess();
4688 GetProcessTimes(proc, &c, &e, &k, &u);
4689 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4691 return av_gettime_relative();
4695 static int64_t getmaxrss(void)
4697 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4698 struct rusage rusage;
4699 getrusage(RUSAGE_SELF, &rusage);
4700 return (int64_t)rusage.ru_maxrss * 1024;
4701 #elif HAVE_GETPROCESSMEMORYINFO
4703 PROCESS_MEMORY_COUNTERS memcounters;
4704 proc = GetCurrentProcess();
4705 memcounters.cb = sizeof(memcounters);
4706 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4707 return memcounters.PeakPagefileUsage;
4713 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4717 int main(int argc, char **argv)
4724 register_exit(ffmpeg_cleanup);
4726 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4728 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4729 parse_loglevel(argc, argv, options);
4731 if(argc>1 && !strcmp(argv[1], "-d")){
4733 av_log_set_callback(log_callback_null);
4738 avcodec_register_all();
4740 avdevice_register_all();
4742 avfilter_register_all();
4744 avformat_network_init();
4746 show_banner(argc, argv, options);
4748 /* parse options and open all input/output files */
4749 ret = ffmpeg_parse_options(argc, argv);
4753 if (nb_output_files <= 0 && nb_input_files == 0) {
4755 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4759 /* file converter / grab */
4760 if (nb_output_files <= 0) {
4761 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4765 // if (nb_input_files == 0) {
4766 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4770 for (i = 0; i < nb_output_files; i++) {
4771 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4775 current_time = ti = getutime();
4776 if (transcode() < 0)
4778 ti = getutime() - ti;
4780 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4782 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4783 decode_error_stat[0], decode_error_stat[1]);
4784 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4787 exit_program(received_nb_signals ? 255 : main_return_code);
4788 return main_return_code;