2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
73 #if HAVE_SYS_RESOURCE_H
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
80 #if HAVE_GETPROCESSMEMORYINFO
84 #if HAVE_SETCONSOLECTRLHANDLER
90 #include <sys/select.h>
95 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 typedef struct BenchmarkTimeStamps {
127 } BenchmarkTimeStamps;
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
140 static int want_sdp = 1;
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
145 static uint8_t *subtitle_out;
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
157 FilterGraph **filtergraphs;
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
168 static void free_input_threads(void);
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
176 static int sub2video_get_blank_frame(InputStream *ist)
179 AVFrame *frame = ist->sub2video.frame;
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
194 uint32_t *pal, *dst2;
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
209 dst += r->y * dst_linesize + r->x * 4;
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
218 src += r->linesize[0];
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 AVFrame *frame = ist->sub2video.frame;
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
240 void sub2video_update(InputStream *ist, AVSubtitle *sub)
242 AVFrame *frame = ist->sub2video.frame;
246 int64_t pts, end_pts;
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
257 pts = ist->sub2video.end_pts;
261 if (sub2video_get_blank_frame(ist) < 0) {
262 av_log(ist->dec_ctx, AV_LOG_ERROR,
263 "Impossible to get a blank canvas.\n");
266 dst = frame->data [0];
267 dst_linesize = frame->linesize[0];
268 for (i = 0; i < num_rects; i++)
269 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270 sub2video_push_ref(ist, pts);
271 ist->sub2video.end_pts = end_pts;
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
276 InputFile *infile = input_files[ist->file_index];
280 /* When a frame is read from a file, examine all sub2video streams in
281 the same file and send the sub2video frame again. Otherwise, decoded
282 video frames could be accumulating in the filter graph while a filter
283 (possibly overlay) is desperately waiting for a subtitle frame. */
284 for (i = 0; i < infile->nb_streams; i++) {
285 InputStream *ist2 = input_streams[infile->ist_index + i];
286 if (!ist2->sub2video.frame)
288 /* subtitles seem to be usually muxed ahead of other streams;
289 if not, subtracting a larger time here is necessary */
290 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291 /* do not send the heartbeat frame if the subtitle is already ahead */
292 if (pts2 <= ist2->sub2video.last_pts)
294 if (pts2 >= ist2->sub2video.end_pts ||
295 (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296 sub2video_update(ist2, NULL);
297 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300 sub2video_push_ref(ist2, pts2);
304 static void sub2video_flush(InputStream *ist)
309 if (ist->sub2video.end_pts < INT64_MAX)
310 sub2video_update(ist, NULL);
311 for (i = 0; i < ist->nb_filters; i++) {
312 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313 if (ret != AVERROR_EOF && ret < 0)
314 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
318 /* end of sub2video hack */
320 static void term_exit_sigsafe(void)
324 tcsetattr (0, TCSANOW, &oldtty);
330 av_log(NULL, AV_LOG_QUIET, "%s", "");
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
336 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
341 sigterm_handler(int sig)
344 received_sigterm = sig;
345 received_nb_signals++;
347 if(received_nb_signals > 3) {
348 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349 strlen("Received > 3 system signals, hard exiting\n"));
350 if (ret < 0) { /* Do nothing */ };
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
358 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
363 case CTRL_BREAK_EVENT:
364 sigterm_handler(SIGINT);
367 case CTRL_CLOSE_EVENT:
368 case CTRL_LOGOFF_EVENT:
369 case CTRL_SHUTDOWN_EVENT:
370 sigterm_handler(SIGTERM);
371 /* Basically, with these 3 events, when we return from this method the
372 process is hard terminated, so stall as long as we need to
373 to try and let the main thread(s) clean up and gracefully terminate
374 (we have at most 5 seconds, but should be done far before that). */
375 while (!ffmpeg_exited) {
381 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390 if (!run_as_daemon && stdin_interaction) {
392 if (tcgetattr (0, &tty) == 0) {
396 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397 |INLCR|IGNCR|ICRNL|IXON);
398 tty.c_oflag |= OPOST;
399 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400 tty.c_cflag &= ~(CSIZE|PARENB);
405 tcsetattr (0, TCSANOW, &tty);
407 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
411 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
414 signal(SIGXCPU, sigterm_handler);
417 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
419 #if HAVE_SETCONSOLECTRLHANDLER
420 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
424 /* read a key without blocking */
425 static int read_key(void)
437 n = select(1, &rfds, NULL, NULL, &tv);
446 # if HAVE_PEEKNAMEDPIPE
448 static HANDLE input_handle;
451 input_handle = GetStdHandle(STD_INPUT_HANDLE);
452 is_pipe = !GetConsoleMode(input_handle, &dw);
456 /* When running under a GUI, you will end here. */
457 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458 // input pipe may have been closed by the program that ran ffmpeg
476 static int decode_interrupt_cb(void *ctx)
478 return received_nb_signals > atomic_load(&transcode_init_done);
481 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
483 static void ffmpeg_cleanup(int ret)
488 int maxrss = getmaxrss() / 1024;
489 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
492 for (i = 0; i < nb_filtergraphs; i++) {
493 FilterGraph *fg = filtergraphs[i];
494 avfilter_graph_free(&fg->graph);
495 for (j = 0; j < fg->nb_inputs; j++) {
496 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
498 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499 sizeof(frame), NULL);
500 av_frame_free(&frame);
502 av_fifo_freep(&fg->inputs[j]->frame_queue);
503 if (fg->inputs[j]->ist->sub2video.sub_queue) {
504 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
506 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
507 &sub, sizeof(sub), NULL);
508 avsubtitle_free(&sub);
510 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
512 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
513 av_freep(&fg->inputs[j]->name);
514 av_freep(&fg->inputs[j]);
516 av_freep(&fg->inputs);
517 for (j = 0; j < fg->nb_outputs; j++) {
518 av_freep(&fg->outputs[j]->name);
519 av_freep(&fg->outputs[j]->formats);
520 av_freep(&fg->outputs[j]->channel_layouts);
521 av_freep(&fg->outputs[j]->sample_rates);
522 av_freep(&fg->outputs[j]);
524 av_freep(&fg->outputs);
525 av_freep(&fg->graph_desc);
527 av_freep(&filtergraphs[i]);
529 av_freep(&filtergraphs);
531 av_freep(&subtitle_out);
534 for (i = 0; i < nb_output_files; i++) {
535 OutputFile *of = output_files[i];
540 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
542 avformat_free_context(s);
543 av_dict_free(&of->opts);
545 av_freep(&output_files[i]);
547 for (i = 0; i < nb_output_streams; i++) {
548 OutputStream *ost = output_streams[i];
553 for (j = 0; j < ost->nb_bitstream_filters; j++)
554 av_bsf_free(&ost->bsf_ctx[j]);
555 av_freep(&ost->bsf_ctx);
557 av_frame_free(&ost->filtered_frame);
558 av_frame_free(&ost->last_frame);
559 av_dict_free(&ost->encoder_opts);
561 av_freep(&ost->forced_keyframes);
562 av_expr_free(ost->forced_keyframes_pexpr);
563 av_freep(&ost->avfilter);
564 av_freep(&ost->logfile_prefix);
566 av_freep(&ost->audio_channels_map);
567 ost->audio_channels_mapped = 0;
569 av_dict_free(&ost->sws_dict);
571 avcodec_free_context(&ost->enc_ctx);
572 avcodec_parameters_free(&ost->ref_par);
574 if (ost->muxing_queue) {
575 while (av_fifo_size(ost->muxing_queue)) {
577 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
578 av_packet_unref(&pkt);
580 av_fifo_freep(&ost->muxing_queue);
583 av_freep(&output_streams[i]);
586 free_input_threads();
588 for (i = 0; i < nb_input_files; i++) {
589 avformat_close_input(&input_files[i]->ctx);
590 av_freep(&input_files[i]);
592 for (i = 0; i < nb_input_streams; i++) {
593 InputStream *ist = input_streams[i];
595 av_frame_free(&ist->decoded_frame);
596 av_frame_free(&ist->filter_frame);
597 av_dict_free(&ist->decoder_opts);
598 avsubtitle_free(&ist->prev_sub.subtitle);
599 av_frame_free(&ist->sub2video.frame);
600 av_freep(&ist->filters);
601 av_freep(&ist->hwaccel_device);
602 av_freep(&ist->dts_buffer);
604 avcodec_free_context(&ist->dec_ctx);
606 av_freep(&input_streams[i]);
610 if (fclose(vstats_file))
611 av_log(NULL, AV_LOG_ERROR,
612 "Error closing vstats file, loss of information possible: %s\n",
613 av_err2str(AVERROR(errno)));
615 av_freep(&vstats_filename);
617 av_freep(&input_streams);
618 av_freep(&input_files);
619 av_freep(&output_streams);
620 av_freep(&output_files);
624 avformat_network_deinit();
626 if (received_sigterm) {
627 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
628 (int) received_sigterm);
629 } else if (ret && atomic_load(&transcode_init_done)) {
630 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
636 void remove_avoptions(AVDictionary **a, AVDictionary *b)
638 AVDictionaryEntry *t = NULL;
640 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
641 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
645 void assert_avoptions(AVDictionary *m)
647 AVDictionaryEntry *t;
648 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
649 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
654 static void abort_codec_experimental(AVCodec *c, int encoder)
659 static void update_benchmark(const char *fmt, ...)
661 if (do_benchmark_all) {
662 BenchmarkTimeStamps t = get_benchmark_time_stamps();
668 vsnprintf(buf, sizeof(buf), fmt, va);
670 av_log(NULL, AV_LOG_INFO,
671 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
672 t.user_usec - current_time.user_usec,
673 t.sys_usec - current_time.sys_usec,
674 t.real_usec - current_time.real_usec, buf);
680 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
683 for (i = 0; i < nb_output_streams; i++) {
684 OutputStream *ost2 = output_streams[i];
685 ost2->finished |= ost == ost2 ? this_stream : others;
689 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 AVFormatContext *s = of->ctx;
692 AVStream *st = ost->st;
696 * Audio encoders may split the packets -- #frames in != #packets out.
697 * But there is no reordering, so we can limit the number of output packets
698 * by simply dropping them here.
699 * Counting encoded video frames needs to be done separately because of
700 * reordering, see do_video_out().
701 * Do not count the packet when unqueued because it has been counted when queued.
703 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
704 if (ost->frame_number >= ost->max_frames) {
705 av_packet_unref(pkt);
711 if (!of->header_written) {
712 AVPacket tmp_pkt = {0};
713 /* the muxer is not initialized yet, buffer the packet */
714 if (!av_fifo_space(ost->muxing_queue)) {
715 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
716 ost->max_muxing_queue_size);
717 if (new_size <= av_fifo_size(ost->muxing_queue)) {
718 av_log(NULL, AV_LOG_ERROR,
719 "Too many packets buffered for output stream %d:%d.\n",
720 ost->file_index, ost->st->index);
723 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
727 ret = av_packet_ref(&tmp_pkt, pkt);
730 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
731 av_packet_unref(pkt);
735 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
736 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
737 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
743 ost->quality = sd ? AV_RL32(sd) : -1;
744 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748 ost->error[i] = AV_RL64(sd + 8 + 8*i);
753 if (ost->frame_rate.num && ost->is_cfr) {
754 if (pkt->duration > 0)
755 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
756 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
761 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
764 if (pkt->dts != AV_NOPTS_VALUE &&
765 pkt->pts != AV_NOPTS_VALUE &&
766 pkt->dts > pkt->pts) {
767 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769 ost->file_index, ost->st->index);
771 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
772 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
773 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
776 pkt->dts != AV_NOPTS_VALUE &&
777 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
778 ost->last_mux_dts != AV_NOPTS_VALUE) {
779 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
780 if (pkt->dts < max) {
781 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
782 av_log(s, loglevel, "Non-monotonous DTS in output stream "
783 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
784 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
789 av_log(s, loglevel, "changing to %"PRId64". This may result "
790 "in incorrect timestamps in the output file.\n",
792 if (pkt->pts >= pkt->dts)
793 pkt->pts = FFMAX(pkt->pts, max);
798 ost->last_mux_dts = pkt->dts;
800 ost->data_size += pkt->size;
801 ost->packets_written++;
803 pkt->stream_index = ost->index;
806 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
807 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
808 av_get_media_type_string(ost->enc_ctx->codec_type),
809 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
810 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
815 ret = av_interleaved_write_frame(s, pkt);
817 print_error("av_interleaved_write_frame()", ret);
818 main_return_code = 1;
819 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
821 av_packet_unref(pkt);
824 static void close_output_stream(OutputStream *ost)
826 OutputFile *of = output_files[ost->file_index];
828 ost->finished |= ENCODER_FINISHED;
830 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
831 of->recording_time = FFMIN(of->recording_time, end);
836 * Send a single packet to the output, applying any bitstream filters
837 * associated with the output stream. This may result in any number
838 * of packets actually being written, depending on what bitstream
839 * filters are applied. The supplied packet is consumed and will be
840 * blank (as if newly-allocated) when this function returns.
842 * If eof is set, instead indicate EOF to all bitstream filters and
843 * therefore flush any delayed packets to the output. A blank packet
844 * must be supplied in this case.
846 static void output_packet(OutputFile *of, AVPacket *pkt,
847 OutputStream *ost, int eof)
851 /* apply the output bitstream filters, if any */
852 if (ost->nb_bitstream_filters) {
855 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
862 /* get a packet from the previous filter up the chain */
863 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
864 if (ret == AVERROR(EAGAIN)) {
868 } else if (ret == AVERROR_EOF) {
873 /* send it to the next filter down the chain or to the muxer */
874 if (idx < ost->nb_bitstream_filters) {
875 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
883 write_packet(of, pkt, ost, 0);
886 write_packet(of, pkt, ost, 0);
889 if (ret < 0 && ret != AVERROR_EOF) {
890 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
891 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
897 static int check_recording_time(OutputStream *ost)
899 OutputFile *of = output_files[ost->file_index];
901 if (of->recording_time != INT64_MAX &&
902 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
903 AV_TIME_BASE_Q) >= 0) {
904 close_output_stream(ost);
910 static void do_audio_out(OutputFile *of, OutputStream *ost,
913 AVCodecContext *enc = ost->enc_ctx;
917 av_init_packet(&pkt);
921 if (!check_recording_time(ost))
924 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
925 frame->pts = ost->sync_opts;
926 ost->sync_opts = frame->pts + frame->nb_samples;
927 ost->samples_encoded += frame->nb_samples;
928 ost->frames_encoded++;
930 av_assert0(pkt.size || !pkt.data);
931 update_benchmark(NULL);
933 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
934 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
935 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
936 enc->time_base.num, enc->time_base.den);
939 ret = avcodec_send_frame(enc, frame);
944 ret = avcodec_receive_packet(enc, &pkt);
945 if (ret == AVERROR(EAGAIN))
950 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
955 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
956 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
957 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
958 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
961 output_packet(of, &pkt, ost, 0);
966 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
970 static void do_subtitle_out(OutputFile *of,
974 int subtitle_out_max_size = 1024 * 1024;
975 int subtitle_out_size, nb, i;
980 if (sub->pts == AV_NOPTS_VALUE) {
981 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
990 subtitle_out = av_malloc(subtitle_out_max_size);
992 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
997 /* Note: DVB subtitle need one packet to draw them and one other
998 packet to clear them */
999 /* XXX: signal it in the codec context ? */
1000 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1005 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1008 pts -= output_files[ost->file_index]->start_time;
1009 for (i = 0; i < nb; i++) {
1010 unsigned save_num_rects = sub->num_rects;
1012 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1013 if (!check_recording_time(ost))
1017 // start_display_time is required to be 0
1018 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1019 sub->end_display_time -= sub->start_display_time;
1020 sub->start_display_time = 0;
1024 ost->frames_encoded++;
1026 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1027 subtitle_out_max_size, sub);
1029 sub->num_rects = save_num_rects;
1030 if (subtitle_out_size < 0) {
1031 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1035 av_init_packet(&pkt);
1036 pkt.data = subtitle_out;
1037 pkt.size = subtitle_out_size;
1038 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1039 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1040 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1041 /* XXX: the pts correction is handled here. Maybe handling
1042 it in the codec would be better */
1044 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1049 output_packet(of, &pkt, ost, 0);
1053 static void do_video_out(OutputFile *of,
1055 AVFrame *next_picture,
1058 int ret, format_video_sync;
1060 AVCodecContext *enc = ost->enc_ctx;
1061 AVCodecParameters *mux_par = ost->st->codecpar;
1062 AVRational frame_rate;
1063 int nb_frames, nb0_frames, i;
1064 double delta, delta0;
1065 double duration = 0;
1067 InputStream *ist = NULL;
1068 AVFilterContext *filter = ost->filter->filter;
1070 if (ost->source_index >= 0)
1071 ist = input_streams[ost->source_index];
1073 frame_rate = av_buffersink_get_frame_rate(filter);
1074 if (frame_rate.num > 0 && frame_rate.den > 0)
1075 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1078 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 if (!ost->filters_script &&
1084 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1085 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088 if (!next_picture) {
1090 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1091 ost->last_nb0_frames[1],
1092 ost->last_nb0_frames[2]);
1094 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1095 delta = delta0 + duration;
1097 /* by default, we output a single frame */
1098 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101 format_video_sync = video_sync_method;
1102 if (format_video_sync == VSYNC_AUTO) {
1103 if(!strcmp(of->ctx->oformat->name, "avi")) {
1104 format_video_sync = VSYNC_VFR;
1106 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1108 && format_video_sync == VSYNC_CFR
1109 && input_files[ist->file_index]->ctx->nb_streams == 1
1110 && input_files[ist->file_index]->input_ts_offset == 0) {
1111 format_video_sync = VSYNC_VSCFR;
1113 if (format_video_sync == VSYNC_CFR && copy_ts) {
1114 format_video_sync = VSYNC_VSCFR;
1117 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1121 format_video_sync != VSYNC_PASSTHROUGH &&
1122 format_video_sync != VSYNC_DROP) {
1123 if (delta0 < -0.6) {
1124 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1126 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1127 sync_ipts = ost->sync_opts;
1132 switch (format_video_sync) {
1134 if (ost->frame_number == 0 && delta0 >= 0.5) {
1135 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138 ost->sync_opts = lrint(sync_ipts);
1141 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1142 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1144 } else if (delta < -1.1)
1146 else if (delta > 1.1) {
1147 nb_frames = lrintf(delta);
1149 nb0_frames = lrintf(delta0 - 0.6);
1155 else if (delta > 0.6)
1156 ost->sync_opts = lrint(sync_ipts);
1159 case VSYNC_PASSTHROUGH:
1160 ost->sync_opts = lrint(sync_ipts);
1167 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1168 nb0_frames = FFMIN(nb0_frames, nb_frames);
1170 memmove(ost->last_nb0_frames + 1,
1171 ost->last_nb0_frames,
1172 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1173 ost->last_nb0_frames[0] = nb0_frames;
1175 if (nb0_frames == 0 && ost->last_dropped) {
1177 av_log(NULL, AV_LOG_VERBOSE,
1178 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1179 ost->frame_number, ost->st->index, ost->last_frame->pts);
1181 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1182 if (nb_frames > dts_error_threshold * 30) {
1183 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1187 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1188 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1189 if (nb_frames_dup > dup_warning) {
1190 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1194 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1196 /* duplicates frame if needed */
1197 for (i = 0; i < nb_frames; i++) {
1198 AVFrame *in_picture;
1199 av_init_packet(&pkt);
1203 if (i < nb0_frames && ost->last_frame) {
1204 in_picture = ost->last_frame;
1206 in_picture = next_picture;
1211 in_picture->pts = ost->sync_opts;
1214 if (!check_recording_time(ost))
1216 if (ost->frame_number >= ost->max_frames)
1221 int forced_keyframe = 0;
1224 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1225 ost->top_field_first >= 0)
1226 in_picture->top_field_first = !!ost->top_field_first;
1228 if (in_picture->interlaced_frame) {
1229 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 in_picture->quality = enc->global_quality;
1237 in_picture->pict_type = 0;
1239 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1240 in_picture->pts != AV_NOPTS_VALUE)
1241 ost->forced_kf_ref_pts = in_picture->pts;
1243 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1244 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1245 if (ost->forced_kf_index < ost->forced_kf_count &&
1246 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1247 ost->forced_kf_index++;
1248 forced_keyframe = 1;
1249 } else if (ost->forced_keyframes_pexpr) {
1251 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1252 res = av_expr_eval(ost->forced_keyframes_pexpr,
1253 ost->forced_keyframes_expr_const_values, NULL);
1254 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1255 ost->forced_keyframes_expr_const_values[FKF_N],
1256 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1257 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1258 ost->forced_keyframes_expr_const_values[FKF_T],
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1262 forced_keyframe = 1;
1263 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1264 ost->forced_keyframes_expr_const_values[FKF_N];
1265 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1266 ost->forced_keyframes_expr_const_values[FKF_T];
1267 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1270 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1271 } else if ( ost->forced_keyframes
1272 && !strncmp(ost->forced_keyframes, "source", 6)
1273 && in_picture->key_frame==1) {
1274 forced_keyframe = 1;
1277 if (forced_keyframe) {
1278 in_picture->pict_type = AV_PICTURE_TYPE_I;
1279 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1282 update_benchmark(NULL);
1284 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1285 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1286 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1287 enc->time_base.num, enc->time_base.den);
1290 ost->frames_encoded++;
1292 ret = avcodec_send_frame(enc, in_picture);
1297 ret = avcodec_receive_packet(enc, &pkt);
1298 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1299 if (ret == AVERROR(EAGAIN))
1305 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1308 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1311 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1312 pkt.pts = ost->sync_opts;
1314 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1317 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1318 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1319 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1320 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1323 frame_size = pkt.size;
1324 output_packet(of, &pkt, ost, 0);
1326 /* if two pass, output log */
1327 if (ost->logfile && enc->stats_out) {
1328 fprintf(ost->logfile, "%s", enc->stats_out);
1334 * For video, number of frames in == number of packets out.
1335 * But there may be reordering, so we can't throw away frames on encoder
1336 * flush, we need to limit them here, before they go into encoder.
1338 ost->frame_number++;
1340 if (vstats_filename && frame_size)
1341 do_video_stats(ost, frame_size);
1344 if (!ost->last_frame)
1345 ost->last_frame = av_frame_alloc();
1346 av_frame_unref(ost->last_frame);
1347 if (next_picture && ost->last_frame)
1348 av_frame_ref(ost->last_frame, next_picture);
1350 av_frame_free(&ost->last_frame);
1354 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1358 static double psnr(double d)
1360 return -10.0 * log10(d);
1363 static void do_video_stats(OutputStream *ost, int frame_size)
1365 AVCodecContext *enc;
1367 double ti1, bitrate, avg_bitrate;
1369 /* this is executed just the first time do_video_stats is called */
1371 vstats_file = fopen(vstats_filename, "w");
1379 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1380 frame_number = ost->st->nb_frames;
1381 if (vstats_version <= 1) {
1382 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1383 ost->quality / (float)FF_QP2LAMBDA);
1385 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1386 ost->quality / (float)FF_QP2LAMBDA);
1389 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1390 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1392 fprintf(vstats_file,"f_size= %6d ", frame_size);
1393 /* compute pts value */
1394 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1398 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1399 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1400 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1401 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1402 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1406 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1408 static void finish_output_stream(OutputStream *ost)
1410 OutputFile *of = output_files[ost->file_index];
1413 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1416 for (i = 0; i < of->ctx->nb_streams; i++)
1417 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1422 * Get and encode new output from any of the filtergraphs, without causing
1425 * @return 0 for success, <0 for severe errors
1427 static int reap_filters(int flush)
1429 AVFrame *filtered_frame = NULL;
1432 /* Reap all buffers present in the buffer sinks */
1433 for (i = 0; i < nb_output_streams; i++) {
1434 OutputStream *ost = output_streams[i];
1435 OutputFile *of = output_files[ost->file_index];
1436 AVFilterContext *filter;
1437 AVCodecContext *enc = ost->enc_ctx;
1440 if (!ost->filter || !ost->filter->graph->graph)
1442 filter = ost->filter->filter;
1444 if (!ost->initialized) {
1445 char error[1024] = "";
1446 ret = init_output_stream(ost, error, sizeof(error));
1448 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1449 ost->file_index, ost->index, error);
1454 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1455 return AVERROR(ENOMEM);
1457 filtered_frame = ost->filtered_frame;
1460 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1461 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1462 AV_BUFFERSINK_FLAG_NO_REQUEST);
1464 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1465 av_log(NULL, AV_LOG_WARNING,
1466 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1467 } else if (flush && ret == AVERROR_EOF) {
1468 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1469 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1473 if (ost->finished) {
1474 av_frame_unref(filtered_frame);
1477 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1478 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1479 AVRational filter_tb = av_buffersink_get_time_base(filter);
1480 AVRational tb = enc->time_base;
1481 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1483 tb.den <<= extra_bits;
1485 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1486 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1487 float_pts /= 1 << extra_bits;
1488 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1489 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1491 filtered_frame->pts =
1492 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1493 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1495 //if (ost->source_index >= 0)
1496 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1498 switch (av_buffersink_get_type(filter)) {
1499 case AVMEDIA_TYPE_VIDEO:
1500 if (!ost->frame_aspect_ratio.num)
1501 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1504 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1505 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1507 enc->time_base.num, enc->time_base.den);
1510 do_video_out(of, ost, filtered_frame, float_pts);
1512 case AVMEDIA_TYPE_AUDIO:
1513 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1514 enc->channels != filtered_frame->channels) {
1515 av_log(NULL, AV_LOG_ERROR,
1516 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1519 do_audio_out(of, ost, filtered_frame);
1522 // TODO support subtitle filters
1526 av_frame_unref(filtered_frame);
1533 static void print_final_stats(int64_t total_size)
1535 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1536 uint64_t subtitle_size = 0;
1537 uint64_t data_size = 0;
1538 float percent = -1.0;
1542 for (i = 0; i < nb_output_streams; i++) {
1543 OutputStream *ost = output_streams[i];
1544 switch (ost->enc_ctx->codec_type) {
1545 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1546 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1547 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1548 default: other_size += ost->data_size; break;
1550 extra_size += ost->enc_ctx->extradata_size;
1551 data_size += ost->data_size;
1552 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1553 != AV_CODEC_FLAG_PASS1)
1557 if (data_size && total_size>0 && total_size >= data_size)
1558 percent = 100.0 * (total_size - data_size) / data_size;
1560 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1561 video_size / 1024.0,
1562 audio_size / 1024.0,
1563 subtitle_size / 1024.0,
1564 other_size / 1024.0,
1565 extra_size / 1024.0);
1567 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1569 av_log(NULL, AV_LOG_INFO, "unknown");
1570 av_log(NULL, AV_LOG_INFO, "\n");
1572 /* print verbose per-stream stats */
1573 for (i = 0; i < nb_input_files; i++) {
1574 InputFile *f = input_files[i];
1575 uint64_t total_packets = 0, total_size = 0;
1577 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1580 for (j = 0; j < f->nb_streams; j++) {
1581 InputStream *ist = input_streams[f->ist_index + j];
1582 enum AVMediaType type = ist->dec_ctx->codec_type;
1584 total_size += ist->data_size;
1585 total_packets += ist->nb_packets;
1587 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1588 i, j, media_type_string(type));
1589 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1590 ist->nb_packets, ist->data_size);
1592 if (ist->decoding_needed) {
1593 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1594 ist->frames_decoded);
1595 if (type == AVMEDIA_TYPE_AUDIO)
1596 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1597 av_log(NULL, AV_LOG_VERBOSE, "; ");
1600 av_log(NULL, AV_LOG_VERBOSE, "\n");
1603 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1604 total_packets, total_size);
1607 for (i = 0; i < nb_output_files; i++) {
1608 OutputFile *of = output_files[i];
1609 uint64_t total_packets = 0, total_size = 0;
1611 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1614 for (j = 0; j < of->ctx->nb_streams; j++) {
1615 OutputStream *ost = output_streams[of->ost_index + j];
1616 enum AVMediaType type = ost->enc_ctx->codec_type;
1618 total_size += ost->data_size;
1619 total_packets += ost->packets_written;
1621 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1622 i, j, media_type_string(type));
1623 if (ost->encoding_needed) {
1624 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1625 ost->frames_encoded);
1626 if (type == AVMEDIA_TYPE_AUDIO)
1627 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1628 av_log(NULL, AV_LOG_VERBOSE, "; ");
1631 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1632 ost->packets_written, ost->data_size);
1634 av_log(NULL, AV_LOG_VERBOSE, "\n");
1637 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1638 total_packets, total_size);
1640 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1641 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1643 av_log(NULL, AV_LOG_WARNING, "\n");
1645 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1650 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1652 AVBPrint buf, buf_script;
1654 AVFormatContext *oc;
1656 AVCodecContext *enc;
1657 int frame_number, vid, i;
1660 int64_t pts = INT64_MIN + 1;
1661 static int64_t last_time = -1;
1662 static int qp_histogram[52];
1663 int hours, mins, secs, us;
1664 const char *hours_sign;
1668 if (!print_stats && !is_last_report && !progress_avio)
1671 if (!is_last_report) {
1672 if (last_time == -1) {
1673 last_time = cur_time;
1676 if ((cur_time - last_time) < 500000)
1678 last_time = cur_time;
1681 t = (cur_time-timer_start) / 1000000.0;
1684 oc = output_files[0]->ctx;
1686 total_size = avio_size(oc->pb);
1687 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1688 total_size = avio_tell(oc->pb);
1691 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1692 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1693 for (i = 0; i < nb_output_streams; i++) {
1695 ost = output_streams[i];
1697 if (!ost->stream_copy)
1698 q = ost->quality / (float) FF_QP2LAMBDA;
1700 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701 av_bprintf(&buf, "q=%2.1f ", q);
1702 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703 ost->file_index, ost->index, q);
1705 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1708 frame_number = ost->frame_number;
1709 fps = t > 1 ? frame_number / t : 0;
1710 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1711 frame_number, fps < 9.95, fps, q);
1712 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1713 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1714 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1715 ost->file_index, ost->index, q);
1717 av_bprintf(&buf, "L");
1721 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1723 for (j = 0; j < 32; j++)
1724 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1727 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1729 double error, error_sum = 0;
1730 double scale, scale_sum = 0;
1732 char type[3] = { 'Y','U','V' };
1733 av_bprintf(&buf, "PSNR=");
1734 for (j = 0; j < 3; j++) {
1735 if (is_last_report) {
1736 error = enc->error[j];
1737 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1739 error = ost->error[j];
1740 scale = enc->width * enc->height * 255.0 * 255.0;
1746 p = psnr(error / scale);
1747 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1748 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1749 ost->file_index, ost->index, type[j] | 32, p);
1751 p = psnr(error_sum / scale_sum);
1752 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1753 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1754 ost->file_index, ost->index, p);
1758 /* compute min output value */
1759 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1760 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1761 ost->st->time_base, AV_TIME_BASE_Q));
1763 nb_frames_drop += ost->last_dropped;
1766 secs = FFABS(pts) / AV_TIME_BASE;
1767 us = FFABS(pts) % AV_TIME_BASE;
1772 hours_sign = (pts < 0) ? "-" : "";
1774 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1775 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1777 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1778 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1779 if (pts == AV_NOPTS_VALUE) {
1780 av_bprintf(&buf, "N/A ");
1782 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1783 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1787 av_bprintf(&buf, "bitrate=N/A");
1788 av_bprintf(&buf_script, "bitrate=N/A\n");
1790 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1791 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1794 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1795 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1796 if (pts == AV_NOPTS_VALUE) {
1797 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1798 av_bprintf(&buf_script, "out_time=N/A\n");
1800 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1801 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1802 hours_sign, hours, mins, secs, us);
1805 if (nb_frames_dup || nb_frames_drop)
1806 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1807 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1808 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1811 av_bprintf(&buf, " speed=N/A");
1812 av_bprintf(&buf_script, "speed=N/A\n");
1814 av_bprintf(&buf, " speed=%4.3gx", speed);
1815 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1818 if (print_stats || is_last_report) {
1819 const char end = is_last_report ? '\n' : '\r';
1820 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1821 fprintf(stderr, "%s %c", buf.str, end);
1823 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1827 av_bprint_finalize(&buf, NULL);
1829 if (progress_avio) {
1830 av_bprintf(&buf_script, "progress=%s\n",
1831 is_last_report ? "end" : "continue");
1832 avio_write(progress_avio, buf_script.str,
1833 FFMIN(buf_script.len, buf_script.size - 1));
1834 avio_flush(progress_avio);
1835 av_bprint_finalize(&buf_script, NULL);
1836 if (is_last_report) {
1837 if ((ret = avio_closep(&progress_avio)) < 0)
1838 av_log(NULL, AV_LOG_ERROR,
1839 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1844 print_final_stats(total_size);
1847 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1849 // We never got any input. Set a fake format, which will
1850 // come from libavformat.
1851 ifilter->format = par->format;
1852 ifilter->sample_rate = par->sample_rate;
1853 ifilter->channels = par->channels;
1854 ifilter->channel_layout = par->channel_layout;
1855 ifilter->width = par->width;
1856 ifilter->height = par->height;
1857 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1860 static void flush_encoders(void)
1864 for (i = 0; i < nb_output_streams; i++) {
1865 OutputStream *ost = output_streams[i];
1866 AVCodecContext *enc = ost->enc_ctx;
1867 OutputFile *of = output_files[ost->file_index];
1869 if (!ost->encoding_needed)
1872 // Try to enable encoding with no input frames.
1873 // Maybe we should just let encoding fail instead.
1874 if (!ost->initialized) {
1875 FilterGraph *fg = ost->filter->graph;
1876 char error[1024] = "";
1878 av_log(NULL, AV_LOG_WARNING,
1879 "Finishing stream %d:%d without any data written to it.\n",
1880 ost->file_index, ost->st->index);
1882 if (ost->filter && !fg->graph) {
1884 for (x = 0; x < fg->nb_inputs; x++) {
1885 InputFilter *ifilter = fg->inputs[x];
1886 if (ifilter->format < 0)
1887 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1890 if (!ifilter_has_all_input_formats(fg))
1893 ret = configure_filtergraph(fg);
1895 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1899 finish_output_stream(ost);
1902 ret = init_output_stream(ost, error, sizeof(error));
1904 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1905 ost->file_index, ost->index, error);
1910 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1913 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1917 const char *desc = NULL;
1921 switch (enc->codec_type) {
1922 case AVMEDIA_TYPE_AUDIO:
1925 case AVMEDIA_TYPE_VIDEO:
1932 av_init_packet(&pkt);
1936 update_benchmark(NULL);
1938 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1939 ret = avcodec_send_frame(enc, NULL);
1941 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1949 if (ret < 0 && ret != AVERROR_EOF) {
1950 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1955 if (ost->logfile && enc->stats_out) {
1956 fprintf(ost->logfile, "%s", enc->stats_out);
1958 if (ret == AVERROR_EOF) {
1959 output_packet(of, &pkt, ost, 1);
1962 if (ost->finished & MUXER_FINISHED) {
1963 av_packet_unref(&pkt);
1966 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1967 pkt_size = pkt.size;
1968 output_packet(of, &pkt, ost, 0);
1969 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1970 do_video_stats(ost, pkt_size);
1977 * Check whether a packet from ist should be written into ost at this time
1979 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1981 OutputFile *of = output_files[ost->file_index];
1982 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1984 if (ost->source_index != ist_index)
1990 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1996 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1998 OutputFile *of = output_files[ost->file_index];
1999 InputFile *f = input_files [ist->file_index];
2000 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2001 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2002 AVPacket opkt = { 0 };
2004 av_init_packet(&opkt);
2006 // EOF: flush output bitstream filters.
2008 output_packet(of, &opkt, ost, 1);
2012 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2013 !ost->copy_initial_nonkeyframes)
2016 if (!ost->frame_number && !ost->copy_prior_start) {
2017 int64_t comp_start = start_time;
2018 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2019 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2020 if (pkt->pts == AV_NOPTS_VALUE ?
2021 ist->pts < comp_start :
2022 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2026 if (of->recording_time != INT64_MAX &&
2027 ist->pts >= of->recording_time + start_time) {
2028 close_output_stream(ost);
2032 if (f->recording_time != INT64_MAX) {
2033 start_time = f->ctx->start_time;
2034 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2035 start_time += f->start_time;
2036 if (ist->pts >= f->recording_time + start_time) {
2037 close_output_stream(ost);
2042 /* force the input stream PTS */
2043 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2046 if (pkt->pts != AV_NOPTS_VALUE)
2047 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2049 opkt.pts = AV_NOPTS_VALUE;
2051 if (pkt->dts == AV_NOPTS_VALUE)
2052 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2054 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2055 opkt.dts -= ost_tb_start_time;
2057 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2058 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2060 duration = ist->dec_ctx->frame_size;
2061 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2062 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2063 ost->mux_timebase) - ost_tb_start_time;
2066 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2068 opkt.flags = pkt->flags;
2071 opkt.buf = av_buffer_ref(pkt->buf);
2075 opkt.data = pkt->data;
2076 opkt.size = pkt->size;
2078 av_copy_packet_side_data(&opkt, pkt);
2080 output_packet(of, &opkt, ost, 0);
2083 int guess_input_channel_layout(InputStream *ist)
2085 AVCodecContext *dec = ist->dec_ctx;
2087 if (!dec->channel_layout) {
2088 char layout_name[256];
2090 if (dec->channels > ist->guess_layout_max)
2092 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2093 if (!dec->channel_layout)
2095 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2096 dec->channels, dec->channel_layout);
2097 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2098 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2103 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2105 if (*got_output || ret<0)
2106 decode_error_stat[ret<0] ++;
2108 if (ret < 0 && exit_on_error)
2111 if (exit_on_error && *got_output && ist) {
2112 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2113 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2119 // Filters can be configured only if the formats of all inputs are known.
2120 static int ifilter_has_all_input_formats(FilterGraph *fg)
2123 for (i = 0; i < fg->nb_inputs; i++) {
2124 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2131 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2133 FilterGraph *fg = ifilter->graph;
2134 int need_reinit, ret, i;
2136 /* determine if the parameters for this input changed */
2137 need_reinit = ifilter->format != frame->format;
2138 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2139 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2142 switch (ifilter->ist->st->codecpar->codec_type) {
2143 case AVMEDIA_TYPE_AUDIO:
2144 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2145 ifilter->channels != frame->channels ||
2146 ifilter->channel_layout != frame->channel_layout;
2148 case AVMEDIA_TYPE_VIDEO:
2149 need_reinit |= ifilter->width != frame->width ||
2150 ifilter->height != frame->height;
2155 ret = ifilter_parameters_from_frame(ifilter, frame);
2160 /* (re)init the graph if possible, otherwise buffer the frame and return */
2161 if (need_reinit || !fg->graph) {
2162 for (i = 0; i < fg->nb_inputs; i++) {
2163 if (!ifilter_has_all_input_formats(fg)) {
2164 AVFrame *tmp = av_frame_clone(frame);
2166 return AVERROR(ENOMEM);
2167 av_frame_unref(frame);
2169 if (!av_fifo_space(ifilter->frame_queue)) {
2170 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2172 av_frame_free(&tmp);
2176 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2181 ret = reap_filters(1);
2182 if (ret < 0 && ret != AVERROR_EOF) {
2183 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2187 ret = configure_filtergraph(fg);
2189 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2194 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2196 if (ret != AVERROR_EOF)
2197 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2204 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2210 if (ifilter->filter) {
2211 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2215 // the filtergraph was never configured
2216 if (ifilter->format < 0)
2217 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2218 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2219 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2220 return AVERROR_INVALIDDATA;
2227 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2228 // There is the following difference: if you got a frame, you must call
2229 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2230 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2231 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2238 ret = avcodec_send_packet(avctx, pkt);
2239 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2240 // decoded frames with avcodec_receive_frame() until done.
2241 if (ret < 0 && ret != AVERROR_EOF)
2245 ret = avcodec_receive_frame(avctx, frame);
2246 if (ret < 0 && ret != AVERROR(EAGAIN))
2254 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2259 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2260 for (i = 0; i < ist->nb_filters; i++) {
2261 if (i < ist->nb_filters - 1) {
2262 f = ist->filter_frame;
2263 ret = av_frame_ref(f, decoded_frame);
2268 ret = ifilter_send_frame(ist->filters[i], f);
2269 if (ret == AVERROR_EOF)
2270 ret = 0; /* ignore */
2272 av_log(NULL, AV_LOG_ERROR,
2273 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2280 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2283 AVFrame *decoded_frame;
2284 AVCodecContext *avctx = ist->dec_ctx;
2286 AVRational decoded_frame_tb;
2288 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2289 return AVERROR(ENOMEM);
2290 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2291 return AVERROR(ENOMEM);
2292 decoded_frame = ist->decoded_frame;
2294 update_benchmark(NULL);
2295 ret = decode(avctx, decoded_frame, got_output, pkt);
2296 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2300 if (ret >= 0 && avctx->sample_rate <= 0) {
2301 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2302 ret = AVERROR_INVALIDDATA;
2305 if (ret != AVERROR_EOF)
2306 check_decode_result(ist, got_output, ret);
2308 if (!*got_output || ret < 0)
2311 ist->samples_decoded += decoded_frame->nb_samples;
2312 ist->frames_decoded++;
2315 /* increment next_dts to use for the case where the input stream does not
2316 have timestamps or there are multiple frames in the packet */
2317 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2319 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2323 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2324 decoded_frame_tb = ist->st->time_base;
2325 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2326 decoded_frame->pts = pkt->pts;
2327 decoded_frame_tb = ist->st->time_base;
2329 decoded_frame->pts = ist->dts;
2330 decoded_frame_tb = AV_TIME_BASE_Q;
2332 if (decoded_frame->pts != AV_NOPTS_VALUE)
2333 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2334 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2335 (AVRational){1, avctx->sample_rate});
2336 ist->nb_samples = decoded_frame->nb_samples;
2337 err = send_frame_to_filters(ist, decoded_frame);
2339 av_frame_unref(ist->filter_frame);
2340 av_frame_unref(decoded_frame);
2341 return err < 0 ? err : ret;
2344 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2347 AVFrame *decoded_frame;
2348 int i, ret = 0, err = 0;
2349 int64_t best_effort_timestamp;
2350 int64_t dts = AV_NOPTS_VALUE;
2353 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2354 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2356 if (!eof && pkt && pkt->size == 0)
2359 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2360 return AVERROR(ENOMEM);
2361 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2362 return AVERROR(ENOMEM);
2363 decoded_frame = ist->decoded_frame;
2364 if (ist->dts != AV_NOPTS_VALUE)
2365 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2368 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2371 // The old code used to set dts on the drain packet, which does not work
2372 // with the new API anymore.
2374 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2376 return AVERROR(ENOMEM);
2377 ist->dts_buffer = new;
2378 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2381 update_benchmark(NULL);
2382 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2383 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2387 // The following line may be required in some cases where there is no parser
2388 // or the parser does not has_b_frames correctly
2389 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2390 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2391 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2393 av_log(ist->dec_ctx, AV_LOG_WARNING,
2394 "video_delay is larger in decoder than demuxer %d > %d.\n"
2395 "If you want to help, upload a sample "
2396 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2397 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2398 ist->dec_ctx->has_b_frames,
2399 ist->st->codecpar->video_delay);
2402 if (ret != AVERROR_EOF)
2403 check_decode_result(ist, got_output, ret);
2405 if (*got_output && ret >= 0) {
2406 if (ist->dec_ctx->width != decoded_frame->width ||
2407 ist->dec_ctx->height != decoded_frame->height ||
2408 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2409 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2410 decoded_frame->width,
2411 decoded_frame->height,
2412 decoded_frame->format,
2413 ist->dec_ctx->width,
2414 ist->dec_ctx->height,
2415 ist->dec_ctx->pix_fmt);
2419 if (!*got_output || ret < 0)
2422 if(ist->top_field_first>=0)
2423 decoded_frame->top_field_first = ist->top_field_first;
2425 ist->frames_decoded++;
2427 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2428 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2432 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2434 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2435 *duration_pts = decoded_frame->pkt_duration;
2437 if (ist->framerate.num)
2438 best_effort_timestamp = ist->cfr_next_pts++;
2440 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2441 best_effort_timestamp = ist->dts_buffer[0];
2443 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2444 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2445 ist->nb_dts_buffer--;
2448 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2449 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2451 if (ts != AV_NOPTS_VALUE)
2452 ist->next_pts = ist->pts = ts;
2456 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2457 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2458 ist->st->index, av_ts2str(decoded_frame->pts),
2459 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2460 best_effort_timestamp,
2461 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2462 decoded_frame->key_frame, decoded_frame->pict_type,
2463 ist->st->time_base.num, ist->st->time_base.den);
2466 if (ist->st->sample_aspect_ratio.num)
2467 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2469 err = send_frame_to_filters(ist, decoded_frame);
2472 av_frame_unref(ist->filter_frame);
2473 av_frame_unref(decoded_frame);
2474 return err < 0 ? err : ret;
2477 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2480 AVSubtitle subtitle;
2482 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2483 &subtitle, got_output, pkt);
2485 check_decode_result(NULL, got_output, ret);
2487 if (ret < 0 || !*got_output) {
2490 sub2video_flush(ist);
2494 if (ist->fix_sub_duration) {
2496 if (ist->prev_sub.got_output) {
2497 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2498 1000, AV_TIME_BASE);
2499 if (end < ist->prev_sub.subtitle.end_display_time) {
2500 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2501 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2502 ist->prev_sub.subtitle.end_display_time, end,
2503 end <= 0 ? ", dropping it" : "");
2504 ist->prev_sub.subtitle.end_display_time = end;
2507 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2508 FFSWAP(int, ret, ist->prev_sub.ret);
2509 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2517 if (ist->sub2video.frame) {
2518 sub2video_update(ist, &subtitle);
2519 } else if (ist->nb_filters) {
2520 if (!ist->sub2video.sub_queue)
2521 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2522 if (!ist->sub2video.sub_queue)
2524 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2525 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2529 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2533 if (!subtitle.num_rects)
2536 ist->frames_decoded++;
2538 for (i = 0; i < nb_output_streams; i++) {
2539 OutputStream *ost = output_streams[i];
2541 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2542 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2545 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2550 avsubtitle_free(&subtitle);
2554 static int send_filter_eof(InputStream *ist)
2557 /* TODO keep pts also in stream time base to avoid converting back */
2558 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2559 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2561 for (i = 0; i < ist->nb_filters; i++) {
2562 ret = ifilter_send_eof(ist->filters[i], pts);
2569 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2570 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2574 int eof_reached = 0;
2577 if (!ist->saw_first_ts) {
2578 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2580 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2581 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2582 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2584 ist->saw_first_ts = 1;
2587 if (ist->next_dts == AV_NOPTS_VALUE)
2588 ist->next_dts = ist->dts;
2589 if (ist->next_pts == AV_NOPTS_VALUE)
2590 ist->next_pts = ist->pts;
2594 av_init_packet(&avpkt);
2601 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2602 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2603 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2604 ist->next_pts = ist->pts = ist->dts;
2607 // while we have more to decode or while the decoder did output something on EOF
2608 while (ist->decoding_needed) {
2609 int64_t duration_dts = 0;
2610 int64_t duration_pts = 0;
2612 int decode_failed = 0;
2614 ist->pts = ist->next_pts;
2615 ist->dts = ist->next_dts;
2617 switch (ist->dec_ctx->codec_type) {
2618 case AVMEDIA_TYPE_AUDIO:
2619 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2622 case AVMEDIA_TYPE_VIDEO:
2623 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2625 if (!repeating || !pkt || got_output) {
2626 if (pkt && pkt->duration) {
2627 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2628 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2629 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2630 duration_dts = ((int64_t)AV_TIME_BASE *
2631 ist->dec_ctx->framerate.den * ticks) /
2632 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2635 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2636 ist->next_dts += duration_dts;
2638 ist->next_dts = AV_NOPTS_VALUE;
2642 if (duration_pts > 0) {
2643 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2645 ist->next_pts += duration_dts;
2649 case AVMEDIA_TYPE_SUBTITLE:
2652 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2653 if (!pkt && ret >= 0)
2660 if (ret == AVERROR_EOF) {
2666 if (decode_failed) {
2667 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2668 ist->file_index, ist->st->index, av_err2str(ret));
2670 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2671 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2673 if (!decode_failed || exit_on_error)
2679 ist->got_output = 1;
2684 // During draining, we might get multiple output frames in this loop.
2685 // ffmpeg.c does not drain the filter chain on configuration changes,
2686 // which means if we send multiple frames at once to the filters, and
2687 // one of those frames changes configuration, the buffered frames will
2688 // be lost. This can upset certain FATE tests.
2689 // Decode only 1 frame per call on EOF to appease these FATE tests.
2690 // The ideal solution would be to rewrite decoding to use the new
2691 // decoding API in a better way.
2698 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2699 /* except when looping we need to flush but not to send an EOF */
2700 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2701 int ret = send_filter_eof(ist);
2703 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2708 /* handle stream copy */
2709 if (!ist->decoding_needed && pkt) {
2710 ist->dts = ist->next_dts;
2711 switch (ist->dec_ctx->codec_type) {
2712 case AVMEDIA_TYPE_AUDIO:
2713 av_assert1(pkt->duration >= 0);
2714 if (ist->dec_ctx->sample_rate) {
2715 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2716 ist->dec_ctx->sample_rate;
2718 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721 case AVMEDIA_TYPE_VIDEO:
2722 if (ist->framerate.num) {
2723 // TODO: Remove work-around for c99-to-c89 issue 7
2724 AVRational time_base_q = AV_TIME_BASE_Q;
2725 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2726 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2727 } else if (pkt->duration) {
2728 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2729 } else if(ist->dec_ctx->framerate.num != 0) {
2730 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2731 ist->next_dts += ((int64_t)AV_TIME_BASE *
2732 ist->dec_ctx->framerate.den * ticks) /
2733 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2737 ist->pts = ist->dts;
2738 ist->next_pts = ist->next_dts;
2740 for (i = 0; i < nb_output_streams; i++) {
2741 OutputStream *ost = output_streams[i];
2743 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2746 do_streamcopy(ist, ost, pkt);
2749 return !eof_reached;
2752 static void print_sdp(void)
2757 AVIOContext *sdp_pb;
2758 AVFormatContext **avc;
2760 for (i = 0; i < nb_output_files; i++) {
2761 if (!output_files[i]->header_written)
2765 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2768 for (i = 0, j = 0; i < nb_output_files; i++) {
2769 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2770 avc[j] = output_files[i]->ctx;
2778 av_sdp_create(avc, j, sdp, sizeof(sdp));
2780 if (!sdp_filename) {
2781 printf("SDP:\n%s\n", sdp);
2784 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2785 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2787 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2788 avio_closep(&sdp_pb);
2789 av_freep(&sdp_filename);
2797 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2799 InputStream *ist = s->opaque;
2800 const enum AVPixelFormat *p;
2803 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2804 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2805 const AVCodecHWConfig *config = NULL;
2808 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2812 ist->hwaccel_id == HWACCEL_AUTO) {
2814 config = avcodec_get_hw_config(s->codec, i);
2817 if (!(config->methods &
2818 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2820 if (config->pix_fmt == *p)
2825 if (config->device_type != ist->hwaccel_device_type) {
2826 // Different hwaccel offered, ignore.
2830 ret = hwaccel_decode_init(s);
2832 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2833 av_log(NULL, AV_LOG_FATAL,
2834 "%s hwaccel requested for input stream #%d:%d, "
2835 "but cannot be initialized.\n",
2836 av_hwdevice_get_type_name(config->device_type),
2837 ist->file_index, ist->st->index);
2838 return AV_PIX_FMT_NONE;
2843 const HWAccel *hwaccel = NULL;
2845 for (i = 0; hwaccels[i].name; i++) {
2846 if (hwaccels[i].pix_fmt == *p) {
2847 hwaccel = &hwaccels[i];
2852 // No hwaccel supporting this pixfmt.
2855 if (hwaccel->id != ist->hwaccel_id) {
2856 // Does not match requested hwaccel.
2860 ret = hwaccel->init(s);
2862 av_log(NULL, AV_LOG_FATAL,
2863 "%s hwaccel requested for input stream #%d:%d, "
2864 "but cannot be initialized.\n", hwaccel->name,
2865 ist->file_index, ist->st->index);
2866 return AV_PIX_FMT_NONE;
2870 if (ist->hw_frames_ctx) {
2871 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2872 if (!s->hw_frames_ctx)
2873 return AV_PIX_FMT_NONE;
2876 ist->hwaccel_pix_fmt = *p;
2883 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2885 InputStream *ist = s->opaque;
2887 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2888 return ist->hwaccel_get_buffer(s, frame, flags);
2890 return avcodec_default_get_buffer2(s, frame, flags);
2893 static int init_input_stream(int ist_index, char *error, int error_len)
2896 InputStream *ist = input_streams[ist_index];
2898 if (ist->decoding_needed) {
2899 AVCodec *codec = ist->dec;
2901 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2902 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2903 return AVERROR(EINVAL);
2906 ist->dec_ctx->opaque = ist;
2907 ist->dec_ctx->get_format = get_format;
2908 ist->dec_ctx->get_buffer2 = get_buffer;
2909 ist->dec_ctx->thread_safe_callbacks = 1;
2911 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2912 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2913 (ist->decoding_needed & DECODING_FOR_OST)) {
2914 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2915 if (ist->decoding_needed & DECODING_FOR_FILTER)
2916 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2919 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2921 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2922 * audio, and video decoders such as cuvid or mediacodec */
2923 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2925 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2926 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2927 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2928 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2929 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2931 ret = hw_device_setup_for_decode(ist);
2933 snprintf(error, error_len, "Device setup failed for "
2934 "decoder on input stream #%d:%d : %s",
2935 ist->file_index, ist->st->index, av_err2str(ret));
2939 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2940 if (ret == AVERROR_EXPERIMENTAL)
2941 abort_codec_experimental(codec, 0);
2943 snprintf(error, error_len,
2944 "Error while opening decoder for input stream "
2946 ist->file_index, ist->st->index, av_err2str(ret));
2949 assert_avoptions(ist->decoder_opts);
2952 ist->next_pts = AV_NOPTS_VALUE;
2953 ist->next_dts = AV_NOPTS_VALUE;
2958 static InputStream *get_input_stream(OutputStream *ost)
2960 if (ost->source_index >= 0)
2961 return input_streams[ost->source_index];
2965 static int compare_int64(const void *a, const void *b)
2967 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2970 /* open the muxer when all the streams are initialized */
2971 static int check_init_output_file(OutputFile *of, int file_index)
2975 for (i = 0; i < of->ctx->nb_streams; i++) {
2976 OutputStream *ost = output_streams[of->ost_index + i];
2977 if (!ost->initialized)
2981 of->ctx->interrupt_callback = int_cb;
2983 ret = avformat_write_header(of->ctx, &of->opts);
2985 av_log(NULL, AV_LOG_ERROR,
2986 "Could not write header for output file #%d "
2987 "(incorrect codec parameters ?): %s\n",
2988 file_index, av_err2str(ret));
2991 //assert_avoptions(of->opts);
2992 of->header_written = 1;
2994 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2996 if (sdp_filename || want_sdp)
2999 /* flush the muxing queues */
3000 for (i = 0; i < of->ctx->nb_streams; i++) {
3001 OutputStream *ost = output_streams[of->ost_index + i];
3003 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3004 if (!av_fifo_size(ost->muxing_queue))
3005 ost->mux_timebase = ost->st->time_base;
3007 while (av_fifo_size(ost->muxing_queue)) {
3009 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3010 write_packet(of, &pkt, ost, 1);
3017 static int init_output_bsfs(OutputStream *ost)
3022 if (!ost->nb_bitstream_filters)
3025 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3026 ctx = ost->bsf_ctx[i];
3028 ret = avcodec_parameters_copy(ctx->par_in,
3029 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3033 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3035 ret = av_bsf_init(ctx);
3037 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3038 ost->bsf_ctx[i]->filter->name);
3043 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3044 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3048 ost->st->time_base = ctx->time_base_out;
3053 static int init_output_stream_streamcopy(OutputStream *ost)
3055 OutputFile *of = output_files[ost->file_index];
3056 InputStream *ist = get_input_stream(ost);
3057 AVCodecParameters *par_dst = ost->st->codecpar;
3058 AVCodecParameters *par_src = ost->ref_par;
3061 uint32_t codec_tag = par_dst->codec_tag;
3063 av_assert0(ist && !ost->filter);
3065 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3067 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3069 av_log(NULL, AV_LOG_FATAL,
3070 "Error setting up codec context options.\n");
3073 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3076 unsigned int codec_tag_tmp;
3077 if (!of->ctx->oformat->codec_tag ||
3078 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3079 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3080 codec_tag = par_src->codec_tag;
3083 ret = avcodec_parameters_copy(par_dst, par_src);
3087 par_dst->codec_tag = codec_tag;
3089 if (!ost->frame_rate.num)
3090 ost->frame_rate = ist->framerate;
3091 ost->st->avg_frame_rate = ost->frame_rate;
3093 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3097 // copy timebase while removing common factors
3098 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3099 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3101 // copy estimated duration as a hint to the muxer
3102 if (ost->st->duration <= 0 && ist->st->duration > 0)
3103 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3106 ost->st->disposition = ist->st->disposition;
3108 if (ist->st->nb_side_data) {
3109 for (i = 0; i < ist->st->nb_side_data; i++) {
3110 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3113 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3115 return AVERROR(ENOMEM);
3116 memcpy(dst_data, sd_src->data, sd_src->size);
3120 if (ost->rotate_overridden) {
3121 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3122 sizeof(int32_t) * 9);
3124 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3127 switch (par_dst->codec_type) {
3128 case AVMEDIA_TYPE_AUDIO:
3129 if (audio_volume != 256) {
3130 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3133 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3134 par_dst->block_align= 0;
3135 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3136 par_dst->block_align= 0;
3138 case AVMEDIA_TYPE_VIDEO:
3139 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3141 av_mul_q(ost->frame_aspect_ratio,
3142 (AVRational){ par_dst->height, par_dst->width });
3143 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3144 "with stream copy may produce invalid files\n");
3146 else if (ist->st->sample_aspect_ratio.num)
3147 sar = ist->st->sample_aspect_ratio;
3149 sar = par_src->sample_aspect_ratio;
3150 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3151 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3152 ost->st->r_frame_rate = ist->st->r_frame_rate;
3156 ost->mux_timebase = ist->st->time_base;
3161 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3163 AVDictionaryEntry *e;
3165 uint8_t *encoder_string;
3166 int encoder_string_len;
3167 int format_flags = 0;
3168 int codec_flags = ost->enc_ctx->flags;
3170 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3173 e = av_dict_get(of->opts, "fflags", NULL, 0);
3175 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3178 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3180 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3182 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3185 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3188 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3189 encoder_string = av_mallocz(encoder_string_len);
3190 if (!encoder_string)
3193 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3194 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3196 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3197 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3198 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3199 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3202 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3203 AVCodecContext *avctx)
3206 int n = 1, i, size, index = 0;
3209 for (p = kf; *p; p++)
3213 pts = av_malloc_array(size, sizeof(*pts));
3215 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3220 for (i = 0; i < n; i++) {
3221 char *next = strchr(p, ',');
3226 if (!memcmp(p, "chapters", 8)) {
3228 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3231 if (avf->nb_chapters > INT_MAX - size ||
3232 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3234 av_log(NULL, AV_LOG_FATAL,
3235 "Could not allocate forced key frames array.\n");
3238 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3239 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3241 for (j = 0; j < avf->nb_chapters; j++) {
3242 AVChapter *c = avf->chapters[j];
3243 av_assert1(index < size);
3244 pts[index++] = av_rescale_q(c->start, c->time_base,
3245 avctx->time_base) + t;
3250 t = parse_time_or_die("force_key_frames", p, 1);
3251 av_assert1(index < size);
3252 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3259 av_assert0(index == size);
3260 qsort(pts, size, sizeof(*pts), compare_int64);
3261 ost->forced_kf_count = size;
3262 ost->forced_kf_pts = pts;
3265 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3267 InputStream *ist = get_input_stream(ost);
3268 AVCodecContext *enc_ctx = ost->enc_ctx;
3269 AVFormatContext *oc;
3271 if (ost->enc_timebase.num > 0) {
3272 enc_ctx->time_base = ost->enc_timebase;
3276 if (ost->enc_timebase.num < 0) {
3278 enc_ctx->time_base = ist->st->time_base;
3282 oc = output_files[ost->file_index]->ctx;
3283 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3286 enc_ctx->time_base = default_time_base;
3289 static int init_output_stream_encode(OutputStream *ost)
3291 InputStream *ist = get_input_stream(ost);
3292 AVCodecContext *enc_ctx = ost->enc_ctx;
3293 AVCodecContext *dec_ctx = NULL;
3294 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3297 set_encoder_id(output_files[ost->file_index], ost);
3299 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3300 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3301 // which have to be filtered out to prevent leaking them to output files.
3302 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3305 ost->st->disposition = ist->st->disposition;
3307 dec_ctx = ist->dec_ctx;
3309 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3311 for (j = 0; j < oc->nb_streams; j++) {
3312 AVStream *st = oc->streams[j];
3313 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3316 if (j == oc->nb_streams)
3317 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3318 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3319 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3322 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3323 if (!ost->frame_rate.num)
3324 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3325 if (ist && !ost->frame_rate.num)
3326 ost->frame_rate = ist->framerate;
3327 if (ist && !ost->frame_rate.num)
3328 ost->frame_rate = ist->st->r_frame_rate;
3329 if (ist && !ost->frame_rate.num) {
3330 ost->frame_rate = (AVRational){25, 1};
3331 av_log(NULL, AV_LOG_WARNING,
3333 "about the input framerate is available. Falling "
3334 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3335 "if you want a different framerate.\n",
3336 ost->file_index, ost->index);
3338 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3339 if (ost->enc->supported_framerates && !ost->force_fps) {
3340 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3341 ost->frame_rate = ost->enc->supported_framerates[idx];
3343 // reduce frame rate for mpeg4 to be within the spec limits
3344 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3345 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3346 ost->frame_rate.num, ost->frame_rate.den, 65535);
3350 switch (enc_ctx->codec_type) {
3351 case AVMEDIA_TYPE_AUDIO:
3352 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3354 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3355 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3356 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3357 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3358 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3360 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3363 case AVMEDIA_TYPE_VIDEO:
3364 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3366 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3367 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3368 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3369 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3370 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3371 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3373 for (j = 0; j < ost->forced_kf_count; j++)
3374 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3376 enc_ctx->time_base);
3378 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3379 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3380 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3381 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3382 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3383 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3385 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3387 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3388 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3390 enc_ctx->framerate = ost->frame_rate;
3392 ost->st->avg_frame_rate = ost->frame_rate;
3395 enc_ctx->width != dec_ctx->width ||
3396 enc_ctx->height != dec_ctx->height ||
3397 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3398 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3401 if (ost->top_field_first == 0) {
3402 enc_ctx->field_order = AV_FIELD_BB;
3403 } else if (ost->top_field_first == 1) {
3404 enc_ctx->field_order = AV_FIELD_TT;
3407 if (ost->forced_keyframes) {
3408 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3409 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3410 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3412 av_log(NULL, AV_LOG_ERROR,
3413 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3416 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3417 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3418 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3419 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3421 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3422 // parse it only for static kf timings
3423 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3424 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3428 case AVMEDIA_TYPE_SUBTITLE:
3429 enc_ctx->time_base = AV_TIME_BASE_Q;
3430 if (!enc_ctx->width) {
3431 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3432 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3435 case AVMEDIA_TYPE_DATA:
3442 ost->mux_timebase = enc_ctx->time_base;
3447 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3451 if (ost->encoding_needed) {
3452 AVCodec *codec = ost->enc;
3453 AVCodecContext *dec = NULL;
3456 ret = init_output_stream_encode(ost);
3460 if ((ist = get_input_stream(ost)))
3462 if (dec && dec->subtitle_header) {
3463 /* ASS code assumes this buffer is null terminated so add extra byte. */
3464 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3465 if (!ost->enc_ctx->subtitle_header)
3466 return AVERROR(ENOMEM);
3467 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3468 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3470 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3471 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3472 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3474 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3475 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3476 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3478 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3479 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3480 av_buffersink_get_format(ost->filter->filter)) {
3481 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3482 if (!ost->enc_ctx->hw_frames_ctx)
3483 return AVERROR(ENOMEM);
3485 ret = hw_device_setup_for_encode(ost);
3487 snprintf(error, error_len, "Device setup failed for "
3488 "encoder on output stream #%d:%d : %s",
3489 ost->file_index, ost->index, av_err2str(ret));
3493 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3494 int input_props = 0, output_props = 0;
3495 AVCodecDescriptor const *input_descriptor =
3496 avcodec_descriptor_get(dec->codec_id);
3497 AVCodecDescriptor const *output_descriptor =
3498 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3499 if (input_descriptor)
3500 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3501 if (output_descriptor)
3502 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3503 if (input_props && output_props && input_props != output_props) {
3504 snprintf(error, error_len,
3505 "Subtitle encoding currently only possible from text to text "
3506 "or bitmap to bitmap");
3507 return AVERROR_INVALIDDATA;
3511 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3512 if (ret == AVERROR_EXPERIMENTAL)
3513 abort_codec_experimental(codec, 1);
3514 snprintf(error, error_len,
3515 "Error while opening encoder for output stream #%d:%d - "
3516 "maybe incorrect parameters such as bit_rate, rate, width or height",
3517 ost->file_index, ost->index);
3520 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3521 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3522 av_buffersink_set_frame_size(ost->filter->filter,
3523 ost->enc_ctx->frame_size);
3524 assert_avoptions(ost->encoder_opts);
3525 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3526 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3527 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3528 " It takes bits/s as argument, not kbits/s\n");
3530 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3532 av_log(NULL, AV_LOG_FATAL,
3533 "Error initializing the output stream codec context.\n");
3537 * FIXME: ost->st->codec should't be needed here anymore.
3539 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3543 if (ost->enc_ctx->nb_coded_side_data) {
3546 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3547 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3550 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3552 return AVERROR(ENOMEM);
3553 memcpy(dst_data, sd_src->data, sd_src->size);
3558 * Add global input side data. For now this is naive, and copies it
3559 * from the input stream's global side data. All side data should
3560 * really be funneled over AVFrame and libavfilter, then added back to
3561 * packet side data, and then potentially using the first packet for
3566 for (i = 0; i < ist->st->nb_side_data; i++) {
3567 AVPacketSideData *sd = &ist->st->side_data[i];
3568 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3570 return AVERROR(ENOMEM);
3571 memcpy(dst, sd->data, sd->size);
3572 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3573 av_display_rotation_set((uint32_t *)dst, 0);
3577 // copy timebase while removing common factors
3578 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3579 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3581 // copy estimated duration as a hint to the muxer
3582 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3583 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3585 ost->st->codec->codec= ost->enc_ctx->codec;
3586 } else if (ost->stream_copy) {
3587 ret = init_output_stream_streamcopy(ost);
3592 // parse user provided disposition, and update stream values
3593 if (ost->disposition) {
3594 static const AVOption opts[] = {
3595 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3596 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3597 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3598 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3599 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3600 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3601 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3602 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3603 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3604 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3605 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3606 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3607 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3608 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3609 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3610 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3613 static const AVClass class = {
3615 .item_name = av_default_item_name,
3617 .version = LIBAVUTIL_VERSION_INT,
3619 const AVClass *pclass = &class;
3621 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3626 /* initialize bitstream filters for the output stream
3627 * needs to be done here, because the codec id for streamcopy is not
3628 * known until now */
3629 ret = init_output_bsfs(ost);
3633 ost->initialized = 1;
3635 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3642 static void report_new_stream(int input_index, AVPacket *pkt)
3644 InputFile *file = input_files[input_index];
3645 AVStream *st = file->ctx->streams[pkt->stream_index];
3647 if (pkt->stream_index < file->nb_streams_warn)
3649 av_log(file->ctx, AV_LOG_WARNING,
3650 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3651 av_get_media_type_string(st->codecpar->codec_type),
3652 input_index, pkt->stream_index,
3653 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3654 file->nb_streams_warn = pkt->stream_index + 1;
3657 static int transcode_init(void)
3659 int ret = 0, i, j, k;
3660 AVFormatContext *oc;
3663 char error[1024] = {0};
3665 for (i = 0; i < nb_filtergraphs; i++) {
3666 FilterGraph *fg = filtergraphs[i];
3667 for (j = 0; j < fg->nb_outputs; j++) {
3668 OutputFilter *ofilter = fg->outputs[j];
3669 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3671 if (fg->nb_inputs != 1)
3673 for (k = nb_input_streams-1; k >= 0 ; k--)
3674 if (fg->inputs[0]->ist == input_streams[k])
3676 ofilter->ost->source_index = k;
3680 /* init framerate emulation */
3681 for (i = 0; i < nb_input_files; i++) {
3682 InputFile *ifile = input_files[i];
3683 if (ifile->rate_emu)
3684 for (j = 0; j < ifile->nb_streams; j++)
3685 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3688 /* init input streams */
3689 for (i = 0; i < nb_input_streams; i++)
3690 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3691 for (i = 0; i < nb_output_streams; i++) {
3692 ost = output_streams[i];
3693 avcodec_close(ost->enc_ctx);
3698 /* open each encoder */
3699 for (i = 0; i < nb_output_streams; i++) {
3700 // skip streams fed from filtergraphs until we have a frame for them
3701 if (output_streams[i]->filter)
3704 ret = init_output_stream(output_streams[i], error, sizeof(error));
3709 /* discard unused programs */
3710 for (i = 0; i < nb_input_files; i++) {
3711 InputFile *ifile = input_files[i];
3712 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3713 AVProgram *p = ifile->ctx->programs[j];
3714 int discard = AVDISCARD_ALL;
3716 for (k = 0; k < p->nb_stream_indexes; k++)
3717 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3718 discard = AVDISCARD_DEFAULT;
3721 p->discard = discard;
3725 /* write headers for files with no streams */
3726 for (i = 0; i < nb_output_files; i++) {
3727 oc = output_files[i]->ctx;
3728 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3729 ret = check_init_output_file(output_files[i], i);
3736 /* dump the stream mapping */
3737 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3738 for (i = 0; i < nb_input_streams; i++) {
3739 ist = input_streams[i];
3741 for (j = 0; j < ist->nb_filters; j++) {
3742 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3743 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3744 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3745 ist->filters[j]->name);
3746 if (nb_filtergraphs > 1)
3747 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3748 av_log(NULL, AV_LOG_INFO, "\n");
3753 for (i = 0; i < nb_output_streams; i++) {
3754 ost = output_streams[i];
3756 if (ost->attachment_filename) {
3757 /* an attached file */
3758 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3759 ost->attachment_filename, ost->file_index, ost->index);
3763 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3764 /* output from a complex graph */
3765 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3766 if (nb_filtergraphs > 1)
3767 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3769 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3770 ost->index, ost->enc ? ost->enc->name : "?");
3774 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3775 input_streams[ost->source_index]->file_index,
3776 input_streams[ost->source_index]->st->index,
3779 if (ost->sync_ist != input_streams[ost->source_index])
3780 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3781 ost->sync_ist->file_index,
3782 ost->sync_ist->st->index);
3783 if (ost->stream_copy)
3784 av_log(NULL, AV_LOG_INFO, " (copy)");
3786 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3787 const AVCodec *out_codec = ost->enc;
3788 const char *decoder_name = "?";
3789 const char *in_codec_name = "?";
3790 const char *encoder_name = "?";
3791 const char *out_codec_name = "?";
3792 const AVCodecDescriptor *desc;
3795 decoder_name = in_codec->name;
3796 desc = avcodec_descriptor_get(in_codec->id);
3798 in_codec_name = desc->name;
3799 if (!strcmp(decoder_name, in_codec_name))
3800 decoder_name = "native";
3804 encoder_name = out_codec->name;
3805 desc = avcodec_descriptor_get(out_codec->id);
3807 out_codec_name = desc->name;
3808 if (!strcmp(encoder_name, out_codec_name))
3809 encoder_name = "native";
3812 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3813 in_codec_name, decoder_name,
3814 out_codec_name, encoder_name);
3816 av_log(NULL, AV_LOG_INFO, "\n");
3820 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3824 atomic_store(&transcode_init_done, 1);
3829 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3830 static int need_output(void)
3834 for (i = 0; i < nb_output_streams; i++) {
3835 OutputStream *ost = output_streams[i];
3836 OutputFile *of = output_files[ost->file_index];
3837 AVFormatContext *os = output_files[ost->file_index]->ctx;
3839 if (ost->finished ||
3840 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3842 if (ost->frame_number >= ost->max_frames) {
3844 for (j = 0; j < of->ctx->nb_streams; j++)
3845 close_output_stream(output_streams[of->ost_index + j]);
3856 * Select the output stream to process.
3858 * @return selected output stream, or NULL if none available
3860 static OutputStream *choose_output(void)
3863 int64_t opts_min = INT64_MAX;
3864 OutputStream *ost_min = NULL;
3866 for (i = 0; i < nb_output_streams; i++) {
3867 OutputStream *ost = output_streams[i];
3868 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3869 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3871 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3872 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3874 if (!ost->initialized && !ost->inputs_done)
3877 if (!ost->finished && opts < opts_min) {
3879 ost_min = ost->unavailable ? NULL : ost;
3885 static void set_tty_echo(int on)
3889 if (tcgetattr(0, &tty) == 0) {
3890 if (on) tty.c_lflag |= ECHO;
3891 else tty.c_lflag &= ~ECHO;
3892 tcsetattr(0, TCSANOW, &tty);
3897 static int check_keyboard_interaction(int64_t cur_time)
3900 static int64_t last_time;
3901 if (received_nb_signals)
3902 return AVERROR_EXIT;
3903 /* read_key() returns 0 on EOF */
3904 if(cur_time - last_time >= 100000 && !run_as_daemon){
3906 last_time = cur_time;
3910 return AVERROR_EXIT;
3911 if (key == '+') av_log_set_level(av_log_get_level()+10);
3912 if (key == '-') av_log_set_level(av_log_get_level()-10);
3913 if (key == 's') qp_hist ^= 1;
3916 do_hex_dump = do_pkt_dump = 0;
3917 } else if(do_pkt_dump){
3921 av_log_set_level(AV_LOG_DEBUG);
3923 if (key == 'c' || key == 'C'){
3924 char buf[4096], target[64], command[256], arg[256] = {0};
3927 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3930 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3935 fprintf(stderr, "\n");
3937 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3938 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3939 target, time, command, arg);
3940 for (i = 0; i < nb_filtergraphs; i++) {
3941 FilterGraph *fg = filtergraphs[i];
3944 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3945 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3946 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3947 } else if (key == 'c') {
3948 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3949 ret = AVERROR_PATCHWELCOME;
3951 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3953 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3958 av_log(NULL, AV_LOG_ERROR,
3959 "Parse error, at least 3 arguments were expected, "
3960 "only %d given in string '%s'\n", n, buf);
3963 if (key == 'd' || key == 'D'){
3966 debug = input_streams[0]->st->codec->debug<<1;
3967 if(!debug) debug = 1;
3968 while(debug & (FF_DEBUG_DCT_COEFF
3970 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3972 )) //unsupported, would just crash
3979 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3984 fprintf(stderr, "\n");
3985 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3986 fprintf(stderr,"error parsing debug value\n");
3988 for(i=0;i<nb_input_streams;i++) {
3989 input_streams[i]->st->codec->debug = debug;
3991 for(i=0;i<nb_output_streams;i++) {
3992 OutputStream *ost = output_streams[i];
3993 ost->enc_ctx->debug = debug;
3995 if(debug) av_log_set_level(AV_LOG_DEBUG);
3996 fprintf(stderr,"debug=%d\n", debug);
3999 fprintf(stderr, "key function\n"
4000 "? show this help\n"
4001 "+ increase verbosity\n"
4002 "- decrease verbosity\n"
4003 "c Send command to first matching filter supporting it\n"
4004 "C Send/Queue command to all matching filters\n"
4005 "D cycle through available debug modes\n"
4006 "h dump packets/hex press to cycle through the 3 states\n"
4008 "s Show QP histogram\n"
4015 static void *input_thread(void *arg)
4018 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4023 ret = av_read_frame(f->ctx, &pkt);
4025 if (ret == AVERROR(EAGAIN)) {
4030 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4033 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4034 if (flags && ret == AVERROR(EAGAIN)) {
4036 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4037 av_log(f->ctx, AV_LOG_WARNING,
4038 "Thread message queue blocking; consider raising the "
4039 "thread_queue_size option (current value: %d)\n",
4040 f->thread_queue_size);
4043 if (ret != AVERROR_EOF)
4044 av_log(f->ctx, AV_LOG_ERROR,
4045 "Unable to send packet to main thread: %s\n",
4047 av_packet_unref(&pkt);
4048 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4056 static void free_input_thread(int i)
4058 InputFile *f = input_files[i];
4061 if (!f || !f->in_thread_queue)
4063 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4064 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4065 av_packet_unref(&pkt);
4067 pthread_join(f->thread, NULL);
4069 av_thread_message_queue_free(&f->in_thread_queue);
4072 static void free_input_threads(void)
4076 for (i = 0; i < nb_input_files; i++)
4077 free_input_thread(i);
4080 static int init_input_thread(int i)
4083 InputFile *f = input_files[i];
4085 if (nb_input_files == 1)
4088 if (f->ctx->pb ? !f->ctx->pb->seekable :
4089 strcmp(f->ctx->iformat->name, "lavfi"))
4090 f->non_blocking = 1;
4091 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4092 f->thread_queue_size, sizeof(AVPacket));
4096 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4097 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4098 av_thread_message_queue_free(&f->in_thread_queue);
4099 return AVERROR(ret);
4105 static int init_input_threads(void)
4109 for (i = 0; i < nb_input_files; i++) {
4110 ret = init_input_thread(i);
4117 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4119 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4121 AV_THREAD_MESSAGE_NONBLOCK : 0);
4125 static int get_input_packet(InputFile *f, AVPacket *pkt)
4129 for (i = 0; i < f->nb_streams; i++) {
4130 InputStream *ist = input_streams[f->ist_index + i];
4131 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4132 int64_t now = av_gettime_relative() - ist->start;
4134 return AVERROR(EAGAIN);
4139 if (nb_input_files > 1)
4140 return get_input_packet_mt(f, pkt);
4142 return av_read_frame(f->ctx, pkt);
4145 static int got_eagain(void)
4148 for (i = 0; i < nb_output_streams; i++)
4149 if (output_streams[i]->unavailable)
4154 static void reset_eagain(void)
4157 for (i = 0; i < nb_input_files; i++)
4158 input_files[i]->eagain = 0;
4159 for (i = 0; i < nb_output_streams; i++)
4160 output_streams[i]->unavailable = 0;
4163 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4164 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4165 AVRational time_base)
4171 return tmp_time_base;
4174 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4177 return tmp_time_base;
4183 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4186 AVCodecContext *avctx;
4187 int i, ret, has_audio = 0;
4188 int64_t duration = 0;
4190 ret = av_seek_frame(is, -1, is->start_time, 0);
4194 for (i = 0; i < ifile->nb_streams; i++) {
4195 ist = input_streams[ifile->ist_index + i];
4196 avctx = ist->dec_ctx;
4198 /* duration is the length of the last frame in a stream
4199 * when audio stream is present we don't care about
4200 * last video frame length because it's not defined exactly */
4201 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4205 for (i = 0; i < ifile->nb_streams; i++) {
4206 ist = input_streams[ifile->ist_index + i];
4207 avctx = ist->dec_ctx;
4210 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4211 AVRational sample_rate = {1, avctx->sample_rate};
4213 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4218 if (ist->framerate.num) {
4219 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4220 } else if (ist->st->avg_frame_rate.num) {
4221 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4226 if (!ifile->duration)
4227 ifile->time_base = ist->st->time_base;
4228 /* the total duration of the stream, max_pts - min_pts is
4229 * the duration of the stream without the last frame */
4230 duration += ist->max_pts - ist->min_pts;
4231 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4235 if (ifile->loop > 0)
4243 * - 0 -- one packet was read and processed
4244 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4245 * this function should be called again
4246 * - AVERROR_EOF -- this function should not be called again
4248 static int process_input(int file_index)
4250 InputFile *ifile = input_files[file_index];
4251 AVFormatContext *is;
4254 int ret, thread_ret, i, j;
4259 ret = get_input_packet(ifile, &pkt);
4261 if (ret == AVERROR(EAGAIN)) {
4265 if (ret < 0 && ifile->loop) {
4266 AVCodecContext *avctx;
4267 for (i = 0; i < ifile->nb_streams; i++) {
4268 ist = input_streams[ifile->ist_index + i];
4269 avctx = ist->dec_ctx;
4270 if (ist->decoding_needed) {
4271 ret = process_input_packet(ist, NULL, 1);
4274 avcodec_flush_buffers(avctx);
4278 free_input_thread(file_index);
4280 ret = seek_to_start(ifile, is);
4282 thread_ret = init_input_thread(file_index);
4287 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4289 ret = get_input_packet(ifile, &pkt);
4290 if (ret == AVERROR(EAGAIN)) {
4296 if (ret != AVERROR_EOF) {
4297 print_error(is->url, ret);
4302 for (i = 0; i < ifile->nb_streams; i++) {
4303 ist = input_streams[ifile->ist_index + i];
4304 if (ist->decoding_needed) {
4305 ret = process_input_packet(ist, NULL, 0);
4310 /* mark all outputs that don't go through lavfi as finished */
4311 for (j = 0; j < nb_output_streams; j++) {
4312 OutputStream *ost = output_streams[j];
4314 if (ost->source_index == ifile->ist_index + i &&
4315 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4316 finish_output_stream(ost);
4320 ifile->eof_reached = 1;
4321 return AVERROR(EAGAIN);
4327 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4328 is->streams[pkt.stream_index]);
4330 /* the following test is needed in case new streams appear
4331 dynamically in stream : we ignore them */
4332 if (pkt.stream_index >= ifile->nb_streams) {
4333 report_new_stream(file_index, &pkt);
4334 goto discard_packet;
4337 ist = input_streams[ifile->ist_index + pkt.stream_index];
4339 ist->data_size += pkt.size;
4343 goto discard_packet;
4345 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4346 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4351 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4352 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4353 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4354 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4355 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4356 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4357 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4358 av_ts2str(input_files[ist->file_index]->ts_offset),
4359 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4362 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4363 int64_t stime, stime2;
4364 // Correcting starttime based on the enabled streams
4365 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4366 // so we instead do it here as part of discontinuity handling
4367 if ( ist->next_dts == AV_NOPTS_VALUE
4368 && ifile->ts_offset == -is->start_time
4369 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4370 int64_t new_start_time = INT64_MAX;
4371 for (i=0; i<is->nb_streams; i++) {
4372 AVStream *st = is->streams[i];
4373 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4375 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4377 if (new_start_time > is->start_time) {
4378 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4379 ifile->ts_offset = -new_start_time;
4383 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4384 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4385 ist->wrap_correction_done = 1;
4387 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4388 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4389 ist->wrap_correction_done = 0;
4391 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4392 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4393 ist->wrap_correction_done = 0;
4397 /* add the stream-global side data to the first packet */
4398 if (ist->nb_packets == 1) {
4399 for (i = 0; i < ist->st->nb_side_data; i++) {
4400 AVPacketSideData *src_sd = &ist->st->side_data[i];
4403 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4406 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4409 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4413 memcpy(dst_data, src_sd->data, src_sd->size);
4417 if (pkt.dts != AV_NOPTS_VALUE)
4418 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4419 if (pkt.pts != AV_NOPTS_VALUE)
4420 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4422 if (pkt.pts != AV_NOPTS_VALUE)
4423 pkt.pts *= ist->ts_scale;
4424 if (pkt.dts != AV_NOPTS_VALUE)
4425 pkt.dts *= ist->ts_scale;
4427 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4428 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4429 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4430 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4431 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4432 int64_t delta = pkt_dts - ifile->last_ts;
4433 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4434 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4435 ifile->ts_offset -= delta;
4436 av_log(NULL, AV_LOG_DEBUG,
4437 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4438 delta, ifile->ts_offset);
4439 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4440 if (pkt.pts != AV_NOPTS_VALUE)
4441 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4445 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4446 if (pkt.pts != AV_NOPTS_VALUE) {
4447 pkt.pts += duration;
4448 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4449 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4452 if (pkt.dts != AV_NOPTS_VALUE)
4453 pkt.dts += duration;
4455 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4456 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4457 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4458 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4460 int64_t delta = pkt_dts - ist->next_dts;
4461 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4462 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4463 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4464 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4465 ifile->ts_offset -= delta;
4466 av_log(NULL, AV_LOG_DEBUG,
4467 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4468 delta, ifile->ts_offset);
4469 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4470 if (pkt.pts != AV_NOPTS_VALUE)
4471 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4474 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4475 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4476 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4477 pkt.dts = AV_NOPTS_VALUE;
4479 if (pkt.pts != AV_NOPTS_VALUE){
4480 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4481 delta = pkt_pts - ist->next_dts;
4482 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4483 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4484 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4485 pkt.pts = AV_NOPTS_VALUE;
4491 if (pkt.dts != AV_NOPTS_VALUE)
4492 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4495 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4496 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4497 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4498 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4499 av_ts2str(input_files[ist->file_index]->ts_offset),
4500 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4503 sub2video_heartbeat(ist, pkt.pts);
4505 process_input_packet(ist, &pkt, 0);
4508 av_packet_unref(&pkt);
4514 * Perform a step of transcoding for the specified filter graph.
4516 * @param[in] graph filter graph to consider
4517 * @param[out] best_ist input stream where a frame would allow to continue
4518 * @return 0 for success, <0 for error
4520 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4523 int nb_requests, nb_requests_max = 0;
4524 InputFilter *ifilter;
4528 ret = avfilter_graph_request_oldest(graph->graph);
4530 return reap_filters(0);
4532 if (ret == AVERROR_EOF) {
4533 ret = reap_filters(1);
4534 for (i = 0; i < graph->nb_outputs; i++)
4535 close_output_stream(graph->outputs[i]->ost);
4538 if (ret != AVERROR(EAGAIN))
4541 for (i = 0; i < graph->nb_inputs; i++) {
4542 ifilter = graph->inputs[i];
4544 if (input_files[ist->file_index]->eagain ||
4545 input_files[ist->file_index]->eof_reached)
4547 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4548 if (nb_requests > nb_requests_max) {
4549 nb_requests_max = nb_requests;
4555 for (i = 0; i < graph->nb_outputs; i++)
4556 graph->outputs[i]->ost->unavailable = 1;
4562 * Run a single step of transcoding.
4564 * @return 0 for success, <0 for error
4566 static int transcode_step(void)
4569 InputStream *ist = NULL;
4572 ost = choose_output();
4579 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4583 if (ost->filter && !ost->filter->graph->graph) {
4584 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4585 ret = configure_filtergraph(ost->filter->graph);
4587 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4593 if (ost->filter && ost->filter->graph->graph) {
4594 if (!ost->initialized) {
4595 char error[1024] = {0};
4596 ret = init_output_stream(ost, error, sizeof(error));
4598 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4599 ost->file_index, ost->index, error);
4603 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4607 } else if (ost->filter) {
4609 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4610 InputFilter *ifilter = ost->filter->graph->inputs[i];
4611 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4617 ost->inputs_done = 1;
4621 av_assert0(ost->source_index >= 0);
4622 ist = input_streams[ost->source_index];
4625 ret = process_input(ist->file_index);
4626 if (ret == AVERROR(EAGAIN)) {
4627 if (input_files[ist->file_index]->eagain)
4628 ost->unavailable = 1;
4633 return ret == AVERROR_EOF ? 0 : ret;
4635 return reap_filters(0);
4639 * The following code is the main loop of the file converter
4641 static int transcode(void)
4644 AVFormatContext *os;
4647 int64_t timer_start;
4648 int64_t total_packets_written = 0;
4650 ret = transcode_init();
4654 if (stdin_interaction) {
4655 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4658 timer_start = av_gettime_relative();
4661 if ((ret = init_input_threads()) < 0)
4665 while (!received_sigterm) {
4666 int64_t cur_time= av_gettime_relative();
4668 /* if 'q' pressed, exits */
4669 if (stdin_interaction)
4670 if (check_keyboard_interaction(cur_time) < 0)
4673 /* check if there's any stream where output is still needed */
4674 if (!need_output()) {
4675 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4679 ret = transcode_step();
4680 if (ret < 0 && ret != AVERROR_EOF) {
4681 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4685 /* dump report by using the output first video and audio streams */
4686 print_report(0, timer_start, cur_time);
4689 free_input_threads();
4692 /* at the end of stream, we must flush the decoder buffers */
4693 for (i = 0; i < nb_input_streams; i++) {
4694 ist = input_streams[i];
4695 if (!input_files[ist->file_index]->eof_reached) {
4696 process_input_packet(ist, NULL, 0);
4703 /* write the trailer if needed and close file */
4704 for (i = 0; i < nb_output_files; i++) {
4705 os = output_files[i]->ctx;
4706 if (!output_files[i]->header_written) {
4707 av_log(NULL, AV_LOG_ERROR,
4708 "Nothing was written into output file %d (%s), because "
4709 "at least one of its streams received no packets.\n",
4713 if ((ret = av_write_trailer(os)) < 0) {
4714 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4720 /* dump report by using the first video and audio streams */
4721 print_report(1, timer_start, av_gettime_relative());
4723 /* close each encoder */
4724 for (i = 0; i < nb_output_streams; i++) {
4725 ost = output_streams[i];
4726 if (ost->encoding_needed) {
4727 av_freep(&ost->enc_ctx->stats_in);
4729 total_packets_written += ost->packets_written;
4732 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4733 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4737 /* close each decoder */
4738 for (i = 0; i < nb_input_streams; i++) {
4739 ist = input_streams[i];
4740 if (ist->decoding_needed) {
4741 avcodec_close(ist->dec_ctx);
4742 if (ist->hwaccel_uninit)
4743 ist->hwaccel_uninit(ist->dec_ctx);
4747 av_buffer_unref(&hw_device_ctx);
4748 hw_device_free_all();
4755 free_input_threads();
4758 if (output_streams) {
4759 for (i = 0; i < nb_output_streams; i++) {
4760 ost = output_streams[i];
4763 if (fclose(ost->logfile))
4764 av_log(NULL, AV_LOG_ERROR,
4765 "Error closing logfile, loss of information possible: %s\n",
4766 av_err2str(AVERROR(errno)));
4767 ost->logfile = NULL;
4769 av_freep(&ost->forced_kf_pts);
4770 av_freep(&ost->apad);
4771 av_freep(&ost->disposition);
4772 av_dict_free(&ost->encoder_opts);
4773 av_dict_free(&ost->sws_dict);
4774 av_dict_free(&ost->swr_opts);
4775 av_dict_free(&ost->resample_opts);
4782 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4784 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4786 struct rusage rusage;
4788 getrusage(RUSAGE_SELF, &rusage);
4789 time_stamps.user_usec =
4790 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4791 time_stamps.sys_usec =
4792 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4793 #elif HAVE_GETPROCESSTIMES
4795 FILETIME c, e, k, u;
4796 proc = GetCurrentProcess();
4797 GetProcessTimes(proc, &c, &e, &k, &u);
4798 time_stamps.user_usec =
4799 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4800 time_stamps.sys_usec =
4801 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4803 time_stamps.user_usec = time_stamps.sys_usec = 0;
4808 static int64_t getmaxrss(void)
4810 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4811 struct rusage rusage;
4812 getrusage(RUSAGE_SELF, &rusage);
4813 return (int64_t)rusage.ru_maxrss * 1024;
4814 #elif HAVE_GETPROCESSMEMORYINFO
4816 PROCESS_MEMORY_COUNTERS memcounters;
4817 proc = GetCurrentProcess();
4818 memcounters.cb = sizeof(memcounters);
4819 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4820 return memcounters.PeakPagefileUsage;
4826 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4830 int main(int argc, char **argv)
4833 BenchmarkTimeStamps ti;
4837 register_exit(ffmpeg_cleanup);
4839 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4841 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4842 parse_loglevel(argc, argv, options);
4844 if(argc>1 && !strcmp(argv[1], "-d")){
4846 av_log_set_callback(log_callback_null);
4852 avdevice_register_all();
4854 avformat_network_init();
4856 show_banner(argc, argv, options);
4858 /* parse options and open all input/output files */
4859 ret = ffmpeg_parse_options(argc, argv);
4863 if (nb_output_files <= 0 && nb_input_files == 0) {
4865 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4869 /* file converter / grab */
4870 if (nb_output_files <= 0) {
4871 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4875 // if (nb_input_files == 0) {
4876 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4880 for (i = 0; i < nb_output_files; i++) {
4881 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4885 current_time = ti = get_benchmark_time_stamps();
4886 if (transcode() < 0)
4889 int64_t utime, stime, rtime;
4890 current_time = get_benchmark_time_stamps();
4891 utime = current_time.user_usec - ti.user_usec;
4892 stime = current_time.sys_usec - ti.sys_usec;
4893 rtime = current_time.real_usec - ti.real_usec;
4894 av_log(NULL, AV_LOG_INFO,
4895 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4896 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4898 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4899 decode_error_stat[0], decode_error_stat[1]);
4900 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4903 exit_program(received_nb_signals ? 255 : main_return_code);
4904 return main_return_code;