2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
697 if (pkt->size == 0 && pkt->side_data_elems == 0)
700 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
701 if (pkt->dts != AV_NOPTS_VALUE &&
702 pkt->pts != AV_NOPTS_VALUE &&
703 pkt->dts > pkt->pts) {
704 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
706 ost->file_index, ost->st->index);
708 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
709 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
710 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
713 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
714 pkt->dts != AV_NOPTS_VALUE &&
715 !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
716 ost->last_mux_dts != AV_NOPTS_VALUE) {
717 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
718 if (pkt->dts < max) {
719 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
720 av_log(s, loglevel, "Non-monotonous DTS in output stream "
721 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
722 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
724 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
727 av_log(s, loglevel, "changing to %"PRId64". This may result "
728 "in incorrect timestamps in the output file.\n",
730 if(pkt->pts >= pkt->dts)
731 pkt->pts = FFMAX(pkt->pts, max);
736 ost->last_mux_dts = pkt->dts;
738 ost->data_size += pkt->size;
739 ost->packets_written++;
741 pkt->stream_index = ost->index;
744 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
745 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
746 av_get_media_type_string(ost->enc_ctx->codec_type),
747 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
748 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
753 ret = av_interleaved_write_frame(s, pkt);
755 print_error("av_interleaved_write_frame()", ret);
756 main_return_code = 1;
757 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
759 av_packet_unref(pkt);
762 static void close_output_stream(OutputStream *ost)
764 OutputFile *of = output_files[ost->file_index];
766 ost->finished |= ENCODER_FINISHED;
768 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
769 of->recording_time = FFMIN(of->recording_time, end);
773 static int check_recording_time(OutputStream *ost)
775 OutputFile *of = output_files[ost->file_index];
777 if (of->recording_time != INT64_MAX &&
778 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
779 AV_TIME_BASE_Q) >= 0) {
780 close_output_stream(ost);
786 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
789 AVCodecContext *enc = ost->enc_ctx;
793 av_init_packet(&pkt);
797 if (!check_recording_time(ost))
800 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
801 frame->pts = ost->sync_opts;
802 ost->sync_opts = frame->pts + frame->nb_samples;
803 ost->samples_encoded += frame->nb_samples;
804 ost->frames_encoded++;
806 av_assert0(pkt.size || !pkt.data);
807 update_benchmark(NULL);
809 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
810 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
811 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
812 enc->time_base.num, enc->time_base.den);
815 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
816 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
819 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
822 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
825 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
826 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
827 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
828 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
831 write_frame(s, &pkt, ost);
835 static void do_subtitle_out(AVFormatContext *s,
840 int subtitle_out_max_size = 1024 * 1024;
841 int subtitle_out_size, nb, i;
846 if (sub->pts == AV_NOPTS_VALUE) {
847 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
856 subtitle_out = av_malloc(subtitle_out_max_size);
858 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
863 /* Note: DVB subtitle need one packet to draw them and one other
864 packet to clear them */
865 /* XXX: signal it in the codec context ? */
866 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
871 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
873 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
874 pts -= output_files[ost->file_index]->start_time;
875 for (i = 0; i < nb; i++) {
876 unsigned save_num_rects = sub->num_rects;
878 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
879 if (!check_recording_time(ost))
883 // start_display_time is required to be 0
884 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
885 sub->end_display_time -= sub->start_display_time;
886 sub->start_display_time = 0;
890 ost->frames_encoded++;
892 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
893 subtitle_out_max_size, sub);
895 sub->num_rects = save_num_rects;
896 if (subtitle_out_size < 0) {
897 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
901 av_init_packet(&pkt);
902 pkt.data = subtitle_out;
903 pkt.size = subtitle_out_size;
904 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
905 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
906 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
907 /* XXX: the pts correction is handled here. Maybe handling
908 it in the codec would be better */
910 pkt.pts += 90 * sub->start_display_time;
912 pkt.pts += 90 * sub->end_display_time;
915 write_frame(s, &pkt, ost);
919 static void do_video_out(AVFormatContext *s,
921 AVFrame *next_picture,
924 int ret, format_video_sync;
926 AVCodecContext *enc = ost->enc_ctx;
927 AVCodecContext *mux_enc = ost->st->codec;
928 int nb_frames, nb0_frames, i;
929 double delta, delta0;
932 InputStream *ist = NULL;
933 AVFilterContext *filter = ost->filter->filter;
935 if (ost->source_index >= 0)
936 ist = input_streams[ost->source_index];
938 if (filter->inputs[0]->frame_rate.num > 0 &&
939 filter->inputs[0]->frame_rate.den > 0)
940 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
942 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
943 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
945 if (!ost->filters_script &&
949 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
950 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
955 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
956 ost->last_nb0_frames[1],
957 ost->last_nb0_frames[2]);
959 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
960 delta = delta0 + duration;
962 /* by default, we output a single frame */
963 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
966 format_video_sync = video_sync_method;
967 if (format_video_sync == VSYNC_AUTO) {
968 if(!strcmp(s->oformat->name, "avi")) {
969 format_video_sync = VSYNC_VFR;
971 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
973 && format_video_sync == VSYNC_CFR
974 && input_files[ist->file_index]->ctx->nb_streams == 1
975 && input_files[ist->file_index]->input_ts_offset == 0) {
976 format_video_sync = VSYNC_VSCFR;
978 if (format_video_sync == VSYNC_CFR && copy_ts) {
979 format_video_sync = VSYNC_VSCFR;
982 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
986 format_video_sync != VSYNC_PASSTHROUGH &&
987 format_video_sync != VSYNC_DROP) {
989 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
991 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
992 sync_ipts = ost->sync_opts;
997 switch (format_video_sync) {
999 if (ost->frame_number == 0 && delta0 >= 0.5) {
1000 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1003 ost->sync_opts = lrint(sync_ipts);
1006 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1007 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1009 } else if (delta < -1.1)
1011 else if (delta > 1.1) {
1012 nb_frames = lrintf(delta);
1014 nb0_frames = lrintf(delta0 - 0.6);
1020 else if (delta > 0.6)
1021 ost->sync_opts = lrint(sync_ipts);
1024 case VSYNC_PASSTHROUGH:
1025 ost->sync_opts = lrint(sync_ipts);
1032 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1033 nb0_frames = FFMIN(nb0_frames, nb_frames);
1035 memmove(ost->last_nb0_frames + 1,
1036 ost->last_nb0_frames,
1037 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1038 ost->last_nb0_frames[0] = nb0_frames;
1040 if (nb0_frames == 0 && ost->last_dropped) {
1042 av_log(NULL, AV_LOG_VERBOSE,
1043 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1044 ost->frame_number, ost->st->index, ost->last_frame->pts);
1046 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1047 if (nb_frames > dts_error_threshold * 30) {
1048 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1052 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1053 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1055 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1057 /* duplicates frame if needed */
1058 for (i = 0; i < nb_frames; i++) {
1059 AVFrame *in_picture;
1060 av_init_packet(&pkt);
1064 if (i < nb0_frames && ost->last_frame) {
1065 in_picture = ost->last_frame;
1067 in_picture = next_picture;
1072 in_picture->pts = ost->sync_opts;
1075 if (!check_recording_time(ost))
1077 if (ost->frame_number >= ost->max_frames)
1081 #if FF_API_LAVF_FMT_RAWPICTURE
1082 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1083 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1084 /* raw pictures are written as AVPicture structure to
1085 avoid any copies. We support temporarily the older
1087 if (in_picture->interlaced_frame)
1088 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1090 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1091 pkt.data = (uint8_t *)in_picture;
1092 pkt.size = sizeof(AVPicture);
1093 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1094 pkt.flags |= AV_PKT_FLAG_KEY;
1096 write_frame(s, &pkt, ost);
1100 int got_packet, forced_keyframe = 0;
1103 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1104 ost->top_field_first >= 0)
1105 in_picture->top_field_first = !!ost->top_field_first;
1107 if (in_picture->interlaced_frame) {
1108 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1109 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1111 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1113 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1115 in_picture->quality = enc->global_quality;
1116 in_picture->pict_type = 0;
1118 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1119 in_picture->pts * av_q2d(enc->time_base) : NAN;
1120 if (ost->forced_kf_index < ost->forced_kf_count &&
1121 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1122 ost->forced_kf_index++;
1123 forced_keyframe = 1;
1124 } else if (ost->forced_keyframes_pexpr) {
1126 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1127 res = av_expr_eval(ost->forced_keyframes_pexpr,
1128 ost->forced_keyframes_expr_const_values, NULL);
1129 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1130 ost->forced_keyframes_expr_const_values[FKF_N],
1131 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1132 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1133 ost->forced_keyframes_expr_const_values[FKF_T],
1134 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1137 forced_keyframe = 1;
1138 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1139 ost->forced_keyframes_expr_const_values[FKF_N];
1140 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1141 ost->forced_keyframes_expr_const_values[FKF_T];
1142 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1145 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1146 } else if ( ost->forced_keyframes
1147 && !strncmp(ost->forced_keyframes, "source", 6)
1148 && in_picture->key_frame==1) {
1149 forced_keyframe = 1;
1152 if (forced_keyframe) {
1153 in_picture->pict_type = AV_PICTURE_TYPE_I;
1154 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1157 update_benchmark(NULL);
1159 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1160 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1161 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1162 enc->time_base.num, enc->time_base.den);
1165 ost->frames_encoded++;
1167 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1168 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1170 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1176 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1177 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1178 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1179 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1182 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1183 pkt.pts = ost->sync_opts;
1185 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1188 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1189 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1190 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1191 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1194 frame_size = pkt.size;
1195 write_frame(s, &pkt, ost);
1197 /* if two pass, output log */
1198 if (ost->logfile && enc->stats_out) {
1199 fprintf(ost->logfile, "%s", enc->stats_out);
1205 * For video, number of frames in == number of packets out.
1206 * But there may be reordering, so we can't throw away frames on encoder
1207 * flush, we need to limit them here, before they go into encoder.
1209 ost->frame_number++;
1211 if (vstats_filename && frame_size)
1212 do_video_stats(ost, frame_size);
1215 if (!ost->last_frame)
1216 ost->last_frame = av_frame_alloc();
1217 av_frame_unref(ost->last_frame);
1218 if (next_picture && ost->last_frame)
1219 av_frame_ref(ost->last_frame, next_picture);
1221 av_frame_free(&ost->last_frame);
1224 static double psnr(double d)
1226 return -10.0 * log10(d);
1229 static void do_video_stats(OutputStream *ost, int frame_size)
1231 AVCodecContext *enc;
1233 double ti1, bitrate, avg_bitrate;
1235 /* this is executed just the first time do_video_stats is called */
1237 vstats_file = fopen(vstats_filename, "w");
1245 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1246 frame_number = ost->st->nb_frames;
1247 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1248 ost->quality / (float)FF_QP2LAMBDA);
1250 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1251 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1253 fprintf(vstats_file,"f_size= %6d ", frame_size);
1254 /* compute pts value */
1255 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1259 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1260 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1261 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1262 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1263 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1267 static void finish_output_stream(OutputStream *ost)
1269 OutputFile *of = output_files[ost->file_index];
1272 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1275 for (i = 0; i < of->ctx->nb_streams; i++)
1276 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1281 * Get and encode new output from any of the filtergraphs, without causing
1284 * @return 0 for success, <0 for severe errors
1286 static int reap_filters(int flush)
1288 AVFrame *filtered_frame = NULL;
1291 /* Reap all buffers present in the buffer sinks */
1292 for (i = 0; i < nb_output_streams; i++) {
1293 OutputStream *ost = output_streams[i];
1294 OutputFile *of = output_files[ost->file_index];
1295 AVFilterContext *filter;
1296 AVCodecContext *enc = ost->enc_ctx;
1301 filter = ost->filter->filter;
1303 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1304 return AVERROR(ENOMEM);
1306 filtered_frame = ost->filtered_frame;
1309 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1310 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1311 AV_BUFFERSINK_FLAG_NO_REQUEST);
1313 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1314 av_log(NULL, AV_LOG_WARNING,
1315 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1316 } else if (flush && ret == AVERROR_EOF) {
1317 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1318 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1322 if (ost->finished) {
1323 av_frame_unref(filtered_frame);
1326 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1327 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1328 AVRational tb = enc->time_base;
1329 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1331 tb.den <<= extra_bits;
1333 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1334 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1335 float_pts /= 1 << extra_bits;
1336 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1337 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1339 filtered_frame->pts =
1340 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1341 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1343 //if (ost->source_index >= 0)
1344 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1346 switch (filter->inputs[0]->type) {
1347 case AVMEDIA_TYPE_VIDEO:
1348 if (!ost->frame_aspect_ratio.num)
1349 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1352 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1353 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1355 enc->time_base.num, enc->time_base.den);
1358 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1360 case AVMEDIA_TYPE_AUDIO:
1361 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1362 enc->channels != av_frame_get_channels(filtered_frame)) {
1363 av_log(NULL, AV_LOG_ERROR,
1364 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1367 do_audio_out(of->ctx, ost, filtered_frame);
1370 // TODO support subtitle filters
1374 av_frame_unref(filtered_frame);
1381 static void print_final_stats(int64_t total_size)
1383 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1384 uint64_t subtitle_size = 0;
1385 uint64_t data_size = 0;
1386 float percent = -1.0;
1390 for (i = 0; i < nb_output_streams; i++) {
1391 OutputStream *ost = output_streams[i];
1392 switch (ost->enc_ctx->codec_type) {
1393 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1394 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1395 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1396 default: other_size += ost->data_size; break;
1398 extra_size += ost->enc_ctx->extradata_size;
1399 data_size += ost->data_size;
1400 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1401 != AV_CODEC_FLAG_PASS1)
1405 if (data_size && total_size>0 && total_size >= data_size)
1406 percent = 100.0 * (total_size - data_size) / data_size;
1408 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1409 video_size / 1024.0,
1410 audio_size / 1024.0,
1411 subtitle_size / 1024.0,
1412 other_size / 1024.0,
1413 extra_size / 1024.0);
1415 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1417 av_log(NULL, AV_LOG_INFO, "unknown");
1418 av_log(NULL, AV_LOG_INFO, "\n");
1420 /* print verbose per-stream stats */
1421 for (i = 0; i < nb_input_files; i++) {
1422 InputFile *f = input_files[i];
1423 uint64_t total_packets = 0, total_size = 0;
1425 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1426 i, f->ctx->filename);
1428 for (j = 0; j < f->nb_streams; j++) {
1429 InputStream *ist = input_streams[f->ist_index + j];
1430 enum AVMediaType type = ist->dec_ctx->codec_type;
1432 total_size += ist->data_size;
1433 total_packets += ist->nb_packets;
1435 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1436 i, j, media_type_string(type));
1437 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1438 ist->nb_packets, ist->data_size);
1440 if (ist->decoding_needed) {
1441 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1442 ist->frames_decoded);
1443 if (type == AVMEDIA_TYPE_AUDIO)
1444 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1445 av_log(NULL, AV_LOG_VERBOSE, "; ");
1448 av_log(NULL, AV_LOG_VERBOSE, "\n");
1451 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1452 total_packets, total_size);
1455 for (i = 0; i < nb_output_files; i++) {
1456 OutputFile *of = output_files[i];
1457 uint64_t total_packets = 0, total_size = 0;
1459 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1460 i, of->ctx->filename);
1462 for (j = 0; j < of->ctx->nb_streams; j++) {
1463 OutputStream *ost = output_streams[of->ost_index + j];
1464 enum AVMediaType type = ost->enc_ctx->codec_type;
1466 total_size += ost->data_size;
1467 total_packets += ost->packets_written;
1469 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1470 i, j, media_type_string(type));
1471 if (ost->encoding_needed) {
1472 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1473 ost->frames_encoded);
1474 if (type == AVMEDIA_TYPE_AUDIO)
1475 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1476 av_log(NULL, AV_LOG_VERBOSE, "; ");
1479 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1480 ost->packets_written, ost->data_size);
1482 av_log(NULL, AV_LOG_VERBOSE, "\n");
1485 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1486 total_packets, total_size);
1488 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1489 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1491 av_log(NULL, AV_LOG_WARNING, "\n");
1493 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1498 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1501 AVBPrint buf_script;
1503 AVFormatContext *oc;
1505 AVCodecContext *enc;
1506 int frame_number, vid, i;
1509 int64_t pts = INT64_MIN + 1;
1510 static int64_t last_time = -1;
1511 static int qp_histogram[52];
1512 int hours, mins, secs, us;
1516 if (!print_stats && !is_last_report && !progress_avio)
1519 if (!is_last_report) {
1520 if (last_time == -1) {
1521 last_time = cur_time;
1524 if ((cur_time - last_time) < 500000)
1526 last_time = cur_time;
1529 t = (cur_time-timer_start) / 1000000.0;
1532 oc = output_files[0]->ctx;
1534 total_size = avio_size(oc->pb);
1535 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1536 total_size = avio_tell(oc->pb);
1540 av_bprint_init(&buf_script, 0, 1);
1541 for (i = 0; i < nb_output_streams; i++) {
1543 ost = output_streams[i];
1545 if (!ost->stream_copy)
1546 q = ost->quality / (float) FF_QP2LAMBDA;
1548 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1549 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1550 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1551 ost->file_index, ost->index, q);
1553 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1556 frame_number = ost->frame_number;
1557 fps = t > 1 ? frame_number / t : 0;
1558 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1559 frame_number, fps < 9.95, fps, q);
1560 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1561 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1562 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1563 ost->file_index, ost->index, q);
1565 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1569 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1571 for (j = 0; j < 32; j++)
1572 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1575 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1577 double error, error_sum = 0;
1578 double scale, scale_sum = 0;
1580 char type[3] = { 'Y','U','V' };
1581 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1582 for (j = 0; j < 3; j++) {
1583 if (is_last_report) {
1584 error = enc->error[j];
1585 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1587 error = ost->error[j];
1588 scale = enc->width * enc->height * 255.0 * 255.0;
1594 p = psnr(error / scale);
1595 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1596 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1597 ost->file_index, ost->index, type[j] | 32, p);
1599 p = psnr(error_sum / scale_sum);
1600 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1601 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1602 ost->file_index, ost->index, p);
1606 /* compute min output value */
1607 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1608 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1609 ost->st->time_base, AV_TIME_BASE_Q));
1611 nb_frames_drop += ost->last_dropped;
1614 secs = FFABS(pts) / AV_TIME_BASE;
1615 us = FFABS(pts) % AV_TIME_BASE;
1621 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1622 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1624 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1626 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1627 "size=%8.0fkB time=", total_size / 1024.0);
1629 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1630 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1631 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1632 (100 * us) / AV_TIME_BASE);
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1636 av_bprintf(&buf_script, "bitrate=N/A\n");
1638 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1639 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1642 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1643 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1644 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1645 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1646 hours, mins, secs, us);
1648 if (nb_frames_dup || nb_frames_drop)
1649 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1650 nb_frames_dup, nb_frames_drop);
1651 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1652 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1656 av_bprintf(&buf_script, "speed=N/A\n");
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1659 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1662 if (print_stats || is_last_report) {
1663 const char end = is_last_report ? '\n' : '\r';
1664 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1665 fprintf(stderr, "%s %c", buf, end);
1667 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1672 if (progress_avio) {
1673 av_bprintf(&buf_script, "progress=%s\n",
1674 is_last_report ? "end" : "continue");
1675 avio_write(progress_avio, buf_script.str,
1676 FFMIN(buf_script.len, buf_script.size - 1));
1677 avio_flush(progress_avio);
1678 av_bprint_finalize(&buf_script, NULL);
1679 if (is_last_report) {
1680 if ((ret = avio_closep(&progress_avio)) < 0)
1681 av_log(NULL, AV_LOG_ERROR,
1682 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1687 print_final_stats(total_size);
1690 static void flush_encoders(void)
1694 for (i = 0; i < nb_output_streams; i++) {
1695 OutputStream *ost = output_streams[i];
1696 AVCodecContext *enc = ost->enc_ctx;
1697 AVFormatContext *os = output_files[ost->file_index]->ctx;
1698 int stop_encoding = 0;
1700 if (!ost->encoding_needed)
1703 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1705 #if FF_API_LAVF_FMT_RAWPICTURE
1706 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1711 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1714 switch (enc->codec_type) {
1715 case AVMEDIA_TYPE_AUDIO:
1716 encode = avcodec_encode_audio2;
1719 case AVMEDIA_TYPE_VIDEO:
1720 encode = avcodec_encode_video2;
1731 av_init_packet(&pkt);
1735 update_benchmark(NULL);
1736 ret = encode(enc, &pkt, NULL, &got_packet);
1737 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1739 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1744 if (ost->logfile && enc->stats_out) {
1745 fprintf(ost->logfile, "%s", enc->stats_out);
1751 if (ost->finished & MUXER_FINISHED) {
1752 av_packet_unref(&pkt);
1755 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1756 pkt_size = pkt.size;
1757 write_frame(os, &pkt, ost);
1758 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1759 do_video_stats(ost, pkt_size);
1770 * Check whether a packet from ist should be written into ost at this time
1772 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1774 OutputFile *of = output_files[ost->file_index];
1775 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1777 if (ost->source_index != ist_index)
1783 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1789 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1791 OutputFile *of = output_files[ost->file_index];
1792 InputFile *f = input_files [ist->file_index];
1793 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1794 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1798 av_init_packet(&opkt);
1800 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1801 !ost->copy_initial_nonkeyframes)
1804 if (!ost->frame_number && !ost->copy_prior_start) {
1805 int64_t comp_start = start_time;
1806 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1807 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1808 if (pkt->pts == AV_NOPTS_VALUE ?
1809 ist->pts < comp_start :
1810 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1814 if (of->recording_time != INT64_MAX &&
1815 ist->pts >= of->recording_time + start_time) {
1816 close_output_stream(ost);
1820 if (f->recording_time != INT64_MAX) {
1821 start_time = f->ctx->start_time;
1822 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1823 start_time += f->start_time;
1824 if (ist->pts >= f->recording_time + start_time) {
1825 close_output_stream(ost);
1830 /* force the input stream PTS */
1831 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1834 if (pkt->pts != AV_NOPTS_VALUE)
1835 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1837 opkt.pts = AV_NOPTS_VALUE;
1839 if (pkt->dts == AV_NOPTS_VALUE)
1840 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1842 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1843 opkt.dts -= ost_tb_start_time;
1845 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1846 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1848 duration = ist->dec_ctx->frame_size;
1849 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1850 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1851 ost->st->time_base) - ost_tb_start_time;
1854 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1855 opkt.flags = pkt->flags;
1856 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1857 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1858 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1859 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1860 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1862 int ret = av_parser_change(ost->parser, ost->st->codec,
1863 &opkt.data, &opkt.size,
1864 pkt->data, pkt->size,
1865 pkt->flags & AV_PKT_FLAG_KEY);
1867 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1872 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1877 opkt.data = pkt->data;
1878 opkt.size = pkt->size;
1880 av_copy_packet_side_data(&opkt, pkt);
1882 #if FF_API_LAVF_FMT_RAWPICTURE
1883 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1884 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1885 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1886 /* store AVPicture in AVPacket, as expected by the output format */
1887 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1889 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1893 opkt.data = (uint8_t *)&pict;
1894 opkt.size = sizeof(AVPicture);
1895 opkt.flags |= AV_PKT_FLAG_KEY;
1899 write_frame(of->ctx, &opkt, ost);
1902 int guess_input_channel_layout(InputStream *ist)
1904 AVCodecContext *dec = ist->dec_ctx;
1906 if (!dec->channel_layout) {
1907 char layout_name[256];
1909 if (dec->channels > ist->guess_layout_max)
1911 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1912 if (!dec->channel_layout)
1914 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1915 dec->channels, dec->channel_layout);
1916 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1917 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1922 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1924 if (*got_output || ret<0)
1925 decode_error_stat[ret<0] ++;
1927 if (ret < 0 && exit_on_error)
1930 if (exit_on_error && *got_output && ist) {
1931 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1932 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1938 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1940 AVFrame *decoded_frame, *f;
1941 AVCodecContext *avctx = ist->dec_ctx;
1942 int i, ret, err = 0, resample_changed;
1943 AVRational decoded_frame_tb;
1945 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1946 return AVERROR(ENOMEM);
1947 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1948 return AVERROR(ENOMEM);
1949 decoded_frame = ist->decoded_frame;
1951 update_benchmark(NULL);
1952 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1953 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1955 if (ret >= 0 && avctx->sample_rate <= 0) {
1956 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1957 ret = AVERROR_INVALIDDATA;
1960 check_decode_result(ist, got_output, ret);
1962 if (!*got_output || ret < 0)
1965 ist->samples_decoded += decoded_frame->nb_samples;
1966 ist->frames_decoded++;
1969 /* increment next_dts to use for the case where the input stream does not
1970 have timestamps or there are multiple frames in the packet */
1971 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1973 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1977 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1978 ist->resample_channels != avctx->channels ||
1979 ist->resample_channel_layout != decoded_frame->channel_layout ||
1980 ist->resample_sample_rate != decoded_frame->sample_rate;
1981 if (resample_changed) {
1982 char layout1[64], layout2[64];
1984 if (!guess_input_channel_layout(ist)) {
1985 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986 "layout for Input Stream #%d.%d\n", ist->file_index,
1990 decoded_frame->channel_layout = avctx->channel_layout;
1992 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1993 ist->resample_channel_layout);
1994 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995 decoded_frame->channel_layout);
1997 av_log(NULL, AV_LOG_INFO,
1998 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999 ist->file_index, ist->st->index,
2000 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2001 ist->resample_channels, layout1,
2002 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003 avctx->channels, layout2);
2005 ist->resample_sample_fmt = decoded_frame->format;
2006 ist->resample_sample_rate = decoded_frame->sample_rate;
2007 ist->resample_channel_layout = decoded_frame->channel_layout;
2008 ist->resample_channels = avctx->channels;
2010 for (i = 0; i < nb_filtergraphs; i++)
2011 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012 FilterGraph *fg = filtergraphs[i];
2013 if (configure_filtergraph(fg) < 0) {
2014 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2020 /* if the decoder provides a pts, use it instead of the last packet pts.
2021 the decoder could be delaying output by a packet or more. */
2022 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024 decoded_frame_tb = avctx->time_base;
2025 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = decoded_frame->pkt_pts;
2027 decoded_frame_tb = ist->st->time_base;
2028 } else if (pkt->pts != AV_NOPTS_VALUE) {
2029 decoded_frame->pts = pkt->pts;
2030 decoded_frame_tb = ist->st->time_base;
2032 decoded_frame->pts = ist->dts;
2033 decoded_frame_tb = AV_TIME_BASE_Q;
2035 pkt->pts = AV_NOPTS_VALUE;
2036 if (decoded_frame->pts != AV_NOPTS_VALUE)
2037 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039 (AVRational){1, avctx->sample_rate});
2040 ist->nb_samples = decoded_frame->nb_samples;
2041 for (i = 0; i < ist->nb_filters; i++) {
2042 if (i < ist->nb_filters - 1) {
2043 f = ist->filter_frame;
2044 err = av_frame_ref(f, decoded_frame);
2049 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2050 AV_BUFFERSRC_FLAG_PUSH);
2051 if (err == AVERROR_EOF)
2052 err = 0; /* ignore */
2056 decoded_frame->pts = AV_NOPTS_VALUE;
2058 av_frame_unref(ist->filter_frame);
2059 av_frame_unref(decoded_frame);
2060 return err < 0 ? err : ret;
2063 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2065 AVFrame *decoded_frame, *f;
2066 int i, ret = 0, err = 0, resample_changed;
2067 int64_t best_effort_timestamp;
2068 AVRational *frame_sample_aspect;
2070 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2071 return AVERROR(ENOMEM);
2072 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2073 return AVERROR(ENOMEM);
2074 decoded_frame = ist->decoded_frame;
2075 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2077 update_benchmark(NULL);
2078 ret = avcodec_decode_video2(ist->dec_ctx,
2079 decoded_frame, got_output, pkt);
2080 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2082 // The following line may be required in some cases where there is no parser
2083 // or the parser does not has_b_frames correctly
2084 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2085 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2086 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2088 av_log(ist->dec_ctx, AV_LOG_WARNING,
2089 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2090 "If you want to help, upload a sample "
2091 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2092 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2093 ist->dec_ctx->has_b_frames,
2094 ist->st->codec->has_b_frames);
2097 check_decode_result(ist, got_output, ret);
2099 if (*got_output && ret >= 0) {
2100 if (ist->dec_ctx->width != decoded_frame->width ||
2101 ist->dec_ctx->height != decoded_frame->height ||
2102 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2103 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2104 decoded_frame->width,
2105 decoded_frame->height,
2106 decoded_frame->format,
2107 ist->dec_ctx->width,
2108 ist->dec_ctx->height,
2109 ist->dec_ctx->pix_fmt);
2113 if (!*got_output || ret < 0)
2116 if(ist->top_field_first>=0)
2117 decoded_frame->top_field_first = ist->top_field_first;
2119 ist->frames_decoded++;
2121 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2122 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2126 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2128 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2129 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2130 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2132 if (ts != AV_NOPTS_VALUE)
2133 ist->next_pts = ist->pts = ts;
2137 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2138 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2139 ist->st->index, av_ts2str(decoded_frame->pts),
2140 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2141 best_effort_timestamp,
2142 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2143 decoded_frame->key_frame, decoded_frame->pict_type,
2144 ist->st->time_base.num, ist->st->time_base.den);
2149 if (ist->st->sample_aspect_ratio.num)
2150 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2152 resample_changed = ist->resample_width != decoded_frame->width ||
2153 ist->resample_height != decoded_frame->height ||
2154 ist->resample_pix_fmt != decoded_frame->format;
2155 if (resample_changed) {
2156 av_log(NULL, AV_LOG_INFO,
2157 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2158 ist->file_index, ist->st->index,
2159 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2160 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2162 ist->resample_width = decoded_frame->width;
2163 ist->resample_height = decoded_frame->height;
2164 ist->resample_pix_fmt = decoded_frame->format;
2166 for (i = 0; i < nb_filtergraphs; i++) {
2167 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2168 configure_filtergraph(filtergraphs[i]) < 0) {
2169 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2175 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2176 for (i = 0; i < ist->nb_filters; i++) {
2177 if (!frame_sample_aspect->num)
2178 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2180 if (i < ist->nb_filters - 1) {
2181 f = ist->filter_frame;
2182 err = av_frame_ref(f, decoded_frame);
2187 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2188 if (ret == AVERROR_EOF) {
2189 ret = 0; /* ignore */
2190 } else if (ret < 0) {
2191 av_log(NULL, AV_LOG_FATAL,
2192 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2198 av_frame_unref(ist->filter_frame);
2199 av_frame_unref(decoded_frame);
2200 return err < 0 ? err : ret;
2203 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2205 AVSubtitle subtitle;
2206 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2207 &subtitle, got_output, pkt);
2209 check_decode_result(NULL, got_output, ret);
2211 if (ret < 0 || !*got_output) {
2213 sub2video_flush(ist);
2217 if (ist->fix_sub_duration) {
2219 if (ist->prev_sub.got_output) {
2220 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2221 1000, AV_TIME_BASE);
2222 if (end < ist->prev_sub.subtitle.end_display_time) {
2223 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2224 "Subtitle duration reduced from %d to %d%s\n",
2225 ist->prev_sub.subtitle.end_display_time, end,
2226 end <= 0 ? ", dropping it" : "");
2227 ist->prev_sub.subtitle.end_display_time = end;
2230 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2231 FFSWAP(int, ret, ist->prev_sub.ret);
2232 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2240 sub2video_update(ist, &subtitle);
2242 if (!subtitle.num_rects)
2245 ist->frames_decoded++;
2247 for (i = 0; i < nb_output_streams; i++) {
2248 OutputStream *ost = output_streams[i];
2250 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2251 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2254 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2258 avsubtitle_free(&subtitle);
2262 static int send_filter_eof(InputStream *ist)
2265 for (i = 0; i < ist->nb_filters; i++) {
2266 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2273 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2274 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2280 if (!ist->saw_first_ts) {
2281 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2283 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2284 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2285 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2287 ist->saw_first_ts = 1;
2290 if (ist->next_dts == AV_NOPTS_VALUE)
2291 ist->next_dts = ist->dts;
2292 if (ist->next_pts == AV_NOPTS_VALUE)
2293 ist->next_pts = ist->pts;
2297 av_init_packet(&avpkt);
2305 if (pkt->dts != AV_NOPTS_VALUE) {
2306 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2307 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2308 ist->next_pts = ist->pts = ist->dts;
2311 // while we have more to decode or while the decoder did output something on EOF
2312 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2316 ist->pts = ist->next_pts;
2317 ist->dts = ist->next_dts;
2319 if (avpkt.size && avpkt.size != pkt->size &&
2320 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2321 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2322 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2323 ist->showed_multi_packet_warning = 1;
2326 switch (ist->dec_ctx->codec_type) {
2327 case AVMEDIA_TYPE_AUDIO:
2328 ret = decode_audio (ist, &avpkt, &got_output);
2330 case AVMEDIA_TYPE_VIDEO:
2331 ret = decode_video (ist, &avpkt, &got_output);
2332 if (avpkt.duration) {
2333 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2334 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2335 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2336 duration = ((int64_t)AV_TIME_BASE *
2337 ist->dec_ctx->framerate.den * ticks) /
2338 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2342 if(ist->dts != AV_NOPTS_VALUE && duration) {
2343 ist->next_dts += duration;
2345 ist->next_dts = AV_NOPTS_VALUE;
2348 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2350 case AVMEDIA_TYPE_SUBTITLE:
2351 ret = transcode_subtitles(ist, &avpkt, &got_output);
2358 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2359 ist->file_index, ist->st->index, av_err2str(ret));
2366 avpkt.pts= AV_NOPTS_VALUE;
2368 // touch data and size only if not EOF
2370 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2378 if (got_output && !pkt)
2382 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2383 /* except when looping we need to flush but not to send an EOF */
2384 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2385 int ret = send_filter_eof(ist);
2387 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2392 /* handle stream copy */
2393 if (!ist->decoding_needed) {
2394 ist->dts = ist->next_dts;
2395 switch (ist->dec_ctx->codec_type) {
2396 case AVMEDIA_TYPE_AUDIO:
2397 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2398 ist->dec_ctx->sample_rate;
2400 case AVMEDIA_TYPE_VIDEO:
2401 if (ist->framerate.num) {
2402 // TODO: Remove work-around for c99-to-c89 issue 7
2403 AVRational time_base_q = AV_TIME_BASE_Q;
2404 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2405 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2406 } else if (pkt->duration) {
2407 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2408 } else if(ist->dec_ctx->framerate.num != 0) {
2409 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2410 ist->next_dts += ((int64_t)AV_TIME_BASE *
2411 ist->dec_ctx->framerate.den * ticks) /
2412 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2416 ist->pts = ist->dts;
2417 ist->next_pts = ist->next_dts;
2419 for (i = 0; pkt && i < nb_output_streams; i++) {
2420 OutputStream *ost = output_streams[i];
2422 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2425 do_streamcopy(ist, ost, pkt);
2431 static void print_sdp(void)
2436 AVIOContext *sdp_pb;
2437 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2441 for (i = 0, j = 0; i < nb_output_files; i++) {
2442 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2443 avc[j] = output_files[i]->ctx;
2451 av_sdp_create(avc, j, sdp, sizeof(sdp));
2453 if (!sdp_filename) {
2454 printf("SDP:\n%s\n", sdp);
2457 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2458 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2460 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2461 avio_closep(&sdp_pb);
2462 av_freep(&sdp_filename);
2470 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2473 for (i = 0; hwaccels[i].name; i++)
2474 if (hwaccels[i].pix_fmt == pix_fmt)
2475 return &hwaccels[i];
2479 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2481 InputStream *ist = s->opaque;
2482 const enum AVPixelFormat *p;
2485 for (p = pix_fmts; *p != -1; p++) {
2486 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2487 const HWAccel *hwaccel;
2489 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2492 hwaccel = get_hwaccel(*p);
2494 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2495 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2498 ret = hwaccel->init(s);
2500 if (ist->hwaccel_id == hwaccel->id) {
2501 av_log(NULL, AV_LOG_FATAL,
2502 "%s hwaccel requested for input stream #%d:%d, "
2503 "but cannot be initialized.\n", hwaccel->name,
2504 ist->file_index, ist->st->index);
2505 return AV_PIX_FMT_NONE;
2509 ist->active_hwaccel_id = hwaccel->id;
2510 ist->hwaccel_pix_fmt = *p;
2517 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2519 InputStream *ist = s->opaque;
2521 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2522 return ist->hwaccel_get_buffer(s, frame, flags);
2524 return avcodec_default_get_buffer2(s, frame, flags);
2527 static int init_input_stream(int ist_index, char *error, int error_len)
2530 InputStream *ist = input_streams[ist_index];
2532 if (ist->decoding_needed) {
2533 AVCodec *codec = ist->dec;
2535 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2536 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2537 return AVERROR(EINVAL);
2540 ist->dec_ctx->opaque = ist;
2541 ist->dec_ctx->get_format = get_format;
2542 ist->dec_ctx->get_buffer2 = get_buffer;
2543 ist->dec_ctx->thread_safe_callbacks = 1;
2545 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2546 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2547 (ist->decoding_needed & DECODING_FOR_OST)) {
2548 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2549 if (ist->decoding_needed & DECODING_FOR_FILTER)
2550 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2553 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2555 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2556 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2557 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2558 if (ret == AVERROR_EXPERIMENTAL)
2559 abort_codec_experimental(codec, 0);
2561 snprintf(error, error_len,
2562 "Error while opening decoder for input stream "
2564 ist->file_index, ist->st->index, av_err2str(ret));
2567 assert_avoptions(ist->decoder_opts);
2570 ist->next_pts = AV_NOPTS_VALUE;
2571 ist->next_dts = AV_NOPTS_VALUE;
2576 static InputStream *get_input_stream(OutputStream *ost)
2578 if (ost->source_index >= 0)
2579 return input_streams[ost->source_index];
2583 static int compare_int64(const void *a, const void *b)
2585 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2588 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2592 if (ost->encoding_needed) {
2593 AVCodec *codec = ost->enc;
2594 AVCodecContext *dec = NULL;
2597 if ((ist = get_input_stream(ost)))
2599 if (dec && dec->subtitle_header) {
2600 /* ASS code assumes this buffer is null terminated so add extra byte. */
2601 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2602 if (!ost->enc_ctx->subtitle_header)
2603 return AVERROR(ENOMEM);
2604 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2605 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2607 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2608 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2609 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2611 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2612 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2613 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2615 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2616 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2617 if (!ost->enc_ctx->hw_frames_ctx)
2618 return AVERROR(ENOMEM);
2621 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2622 if (ret == AVERROR_EXPERIMENTAL)
2623 abort_codec_experimental(codec, 1);
2624 snprintf(error, error_len,
2625 "Error while opening encoder for output stream #%d:%d - "
2626 "maybe incorrect parameters such as bit_rate, rate, width or height",
2627 ost->file_index, ost->index);
2630 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2631 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2632 av_buffersink_set_frame_size(ost->filter->filter,
2633 ost->enc_ctx->frame_size);
2634 assert_avoptions(ost->encoder_opts);
2635 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2636 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2637 " It takes bits/s as argument, not kbits/s\n");
2639 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2641 av_log(NULL, AV_LOG_FATAL,
2642 "Error initializing the output stream codec context.\n");
2646 if (ost->enc_ctx->nb_coded_side_data) {
2649 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2650 sizeof(*ost->st->side_data));
2651 if (!ost->st->side_data)
2652 return AVERROR(ENOMEM);
2654 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2655 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2656 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2658 sd_dst->data = av_malloc(sd_src->size);
2660 return AVERROR(ENOMEM);
2661 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2662 sd_dst->size = sd_src->size;
2663 sd_dst->type = sd_src->type;
2664 ost->st->nb_side_data++;
2668 // copy timebase while removing common factors
2669 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2670 ost->st->codec->codec= ost->enc_ctx->codec;
2672 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2674 av_log(NULL, AV_LOG_FATAL,
2675 "Error setting up codec context options.\n");
2678 // copy timebase while removing common factors
2679 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2685 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2686 AVCodecContext *avctx)
2689 int n = 1, i, size, index = 0;
2692 for (p = kf; *p; p++)
2696 pts = av_malloc_array(size, sizeof(*pts));
2698 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2703 for (i = 0; i < n; i++) {
2704 char *next = strchr(p, ',');
2709 if (!memcmp(p, "chapters", 8)) {
2711 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2714 if (avf->nb_chapters > INT_MAX - size ||
2715 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2717 av_log(NULL, AV_LOG_FATAL,
2718 "Could not allocate forced key frames array.\n");
2721 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2722 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2724 for (j = 0; j < avf->nb_chapters; j++) {
2725 AVChapter *c = avf->chapters[j];
2726 av_assert1(index < size);
2727 pts[index++] = av_rescale_q(c->start, c->time_base,
2728 avctx->time_base) + t;
2733 t = parse_time_or_die("force_key_frames", p, 1);
2734 av_assert1(index < size);
2735 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2742 av_assert0(index == size);
2743 qsort(pts, size, sizeof(*pts), compare_int64);
2744 ost->forced_kf_count = size;
2745 ost->forced_kf_pts = pts;
2748 static void report_new_stream(int input_index, AVPacket *pkt)
2750 InputFile *file = input_files[input_index];
2751 AVStream *st = file->ctx->streams[pkt->stream_index];
2753 if (pkt->stream_index < file->nb_streams_warn)
2755 av_log(file->ctx, AV_LOG_WARNING,
2756 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2757 av_get_media_type_string(st->codec->codec_type),
2758 input_index, pkt->stream_index,
2759 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2760 file->nb_streams_warn = pkt->stream_index + 1;
2763 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2765 AVDictionaryEntry *e;
2767 uint8_t *encoder_string;
2768 int encoder_string_len;
2769 int format_flags = 0;
2770 int codec_flags = 0;
2772 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2775 e = av_dict_get(of->opts, "fflags", NULL, 0);
2777 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2780 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2782 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2784 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2787 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2790 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2791 encoder_string = av_mallocz(encoder_string_len);
2792 if (!encoder_string)
2795 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2796 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2798 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2799 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2800 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2801 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2804 static int transcode_init(void)
2806 int ret = 0, i, j, k;
2807 AVFormatContext *oc;
2810 char error[1024] = {0};
2813 for (i = 0; i < nb_filtergraphs; i++) {
2814 FilterGraph *fg = filtergraphs[i];
2815 for (j = 0; j < fg->nb_outputs; j++) {
2816 OutputFilter *ofilter = fg->outputs[j];
2817 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2819 if (fg->nb_inputs != 1)
2821 for (k = nb_input_streams-1; k >= 0 ; k--)
2822 if (fg->inputs[0]->ist == input_streams[k])
2824 ofilter->ost->source_index = k;
2828 /* init framerate emulation */
2829 for (i = 0; i < nb_input_files; i++) {
2830 InputFile *ifile = input_files[i];
2831 if (ifile->rate_emu)
2832 for (j = 0; j < ifile->nb_streams; j++)
2833 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2836 /* for each output stream, we compute the right encoding parameters */
2837 for (i = 0; i < nb_output_streams; i++) {
2838 AVCodecContext *enc_ctx;
2839 AVCodecContext *dec_ctx = NULL;
2840 ost = output_streams[i];
2841 oc = output_files[ost->file_index]->ctx;
2842 ist = get_input_stream(ost);
2844 if (ost->attachment_filename)
2847 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2850 dec_ctx = ist->dec_ctx;
2852 ost->st->disposition = ist->st->disposition;
2853 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2854 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2856 for (j=0; j<oc->nb_streams; j++) {
2857 AVStream *st = oc->streams[j];
2858 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2861 if (j == oc->nb_streams)
2862 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2863 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2866 if (ost->stream_copy) {
2868 uint64_t extra_size;
2870 av_assert0(ist && !ost->filter);
2872 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2874 if (extra_size > INT_MAX) {
2875 return AVERROR(EINVAL);
2878 /* if stream_copy is selected, no need to decode or encode */
2879 enc_ctx->codec_id = dec_ctx->codec_id;
2880 enc_ctx->codec_type = dec_ctx->codec_type;
2882 if (!enc_ctx->codec_tag) {
2883 unsigned int codec_tag;
2884 if (!oc->oformat->codec_tag ||
2885 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2886 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2887 enc_ctx->codec_tag = dec_ctx->codec_tag;
2890 enc_ctx->bit_rate = dec_ctx->bit_rate;
2891 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2892 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2893 enc_ctx->field_order = dec_ctx->field_order;
2894 if (dec_ctx->extradata_size) {
2895 enc_ctx->extradata = av_mallocz(extra_size);
2896 if (!enc_ctx->extradata) {
2897 return AVERROR(ENOMEM);
2899 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2901 enc_ctx->extradata_size= dec_ctx->extradata_size;
2902 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2904 enc_ctx->time_base = ist->st->time_base;
2906 * Avi is a special case here because it supports variable fps but
2907 * having the fps and timebase differe significantly adds quite some
2910 if(!strcmp(oc->oformat->name, "avi")) {
2911 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2912 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2913 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2914 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2916 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2917 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2918 enc_ctx->ticks_per_frame = 2;
2919 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2920 && av_q2d(ist->st->time_base) < 1.0/500
2922 enc_ctx->time_base = dec_ctx->time_base;
2923 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2924 enc_ctx->time_base.den *= 2;
2925 enc_ctx->ticks_per_frame = 2;
2927 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2928 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2929 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2930 && strcmp(oc->oformat->name, "f4v")
2932 if( copy_tb<0 && dec_ctx->time_base.den
2933 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2934 && av_q2d(ist->st->time_base) < 1.0/500
2936 enc_ctx->time_base = dec_ctx->time_base;
2937 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2940 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2941 && dec_ctx->time_base.num < dec_ctx->time_base.den
2942 && dec_ctx->time_base.num > 0
2943 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2944 enc_ctx->time_base = dec_ctx->time_base;
2947 if (!ost->frame_rate.num)
2948 ost->frame_rate = ist->framerate;
2949 if(ost->frame_rate.num)
2950 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2952 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2953 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2955 if (ist->st->nb_side_data) {
2956 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2957 sizeof(*ist->st->side_data));
2958 if (!ost->st->side_data)
2959 return AVERROR(ENOMEM);
2961 ost->st->nb_side_data = 0;
2962 for (j = 0; j < ist->st->nb_side_data; j++) {
2963 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2964 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2966 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2969 sd_dst->data = av_malloc(sd_src->size);
2971 return AVERROR(ENOMEM);
2972 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2973 sd_dst->size = sd_src->size;
2974 sd_dst->type = sd_src->type;
2975 ost->st->nb_side_data++;
2979 ost->parser = av_parser_init(enc_ctx->codec_id);
2981 switch (enc_ctx->codec_type) {
2982 case AVMEDIA_TYPE_AUDIO:
2983 if (audio_volume != 256) {
2984 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2987 enc_ctx->channel_layout = dec_ctx->channel_layout;
2988 enc_ctx->sample_rate = dec_ctx->sample_rate;
2989 enc_ctx->channels = dec_ctx->channels;
2990 enc_ctx->frame_size = dec_ctx->frame_size;
2991 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2992 enc_ctx->block_align = dec_ctx->block_align;
2993 enc_ctx->initial_padding = dec_ctx->delay;
2994 enc_ctx->profile = dec_ctx->profile;
2995 #if FF_API_AUDIOENC_DELAY
2996 enc_ctx->delay = dec_ctx->delay;
2998 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2999 enc_ctx->block_align= 0;
3000 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3001 enc_ctx->block_align= 0;
3003 case AVMEDIA_TYPE_VIDEO:
3004 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3005 enc_ctx->width = dec_ctx->width;
3006 enc_ctx->height = dec_ctx->height;
3007 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3008 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3010 av_mul_q(ost->frame_aspect_ratio,
3011 (AVRational){ enc_ctx->height, enc_ctx->width });
3012 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3013 "with stream copy may produce invalid files\n");
3015 else if (ist->st->sample_aspect_ratio.num)
3016 sar = ist->st->sample_aspect_ratio;
3018 sar = dec_ctx->sample_aspect_ratio;
3019 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3020 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3021 ost->st->r_frame_rate = ist->st->r_frame_rate;
3023 case AVMEDIA_TYPE_SUBTITLE:
3024 enc_ctx->width = dec_ctx->width;
3025 enc_ctx->height = dec_ctx->height;
3027 case AVMEDIA_TYPE_UNKNOWN:
3028 case AVMEDIA_TYPE_DATA:
3029 case AVMEDIA_TYPE_ATTACHMENT:
3036 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3038 /* should only happen when a default codec is not present. */
3039 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3040 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3041 ret = AVERROR(EINVAL);
3045 set_encoder_id(output_files[ost->file_index], ost);
3048 if (qsv_transcode_init(ost))
3053 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3054 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3056 fg = init_simple_filtergraph(ist, ost);
3057 if (configure_filtergraph(fg)) {
3058 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3063 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3064 if (!ost->frame_rate.num)
3065 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3066 if (ist && !ost->frame_rate.num)
3067 ost->frame_rate = ist->framerate;
3068 if (ist && !ost->frame_rate.num)
3069 ost->frame_rate = ist->st->r_frame_rate;
3070 if (ist && !ost->frame_rate.num) {
3071 ost->frame_rate = (AVRational){25, 1};
3072 av_log(NULL, AV_LOG_WARNING,
3074 "about the input framerate is available. Falling "
3075 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3076 "if you want a different framerate.\n",
3077 ost->file_index, ost->index);
3079 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3080 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3081 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3082 ost->frame_rate = ost->enc->supported_framerates[idx];
3084 // reduce frame rate for mpeg4 to be within the spec limits
3085 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3086 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3087 ost->frame_rate.num, ost->frame_rate.den, 65535);
3091 switch (enc_ctx->codec_type) {
3092 case AVMEDIA_TYPE_AUDIO:
3093 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3094 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3095 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3096 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3097 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3099 case AVMEDIA_TYPE_VIDEO:
3100 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3101 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3102 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3103 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3104 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3105 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3106 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3108 for (j = 0; j < ost->forced_kf_count; j++)
3109 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3111 enc_ctx->time_base);
3113 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3114 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3115 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3116 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3117 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3118 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3119 if (!strncmp(ost->enc->name, "libx264", 7) &&
3120 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3121 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3122 av_log(NULL, AV_LOG_WARNING,
3123 "No pixel format specified, %s for H.264 encoding chosen.\n"
3124 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3125 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3126 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3127 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3128 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3129 av_log(NULL, AV_LOG_WARNING,
3130 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3131 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3132 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3133 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3135 ost->st->avg_frame_rate = ost->frame_rate;
3138 enc_ctx->width != dec_ctx->width ||
3139 enc_ctx->height != dec_ctx->height ||
3140 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3141 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3144 if (ost->forced_keyframes) {
3145 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3146 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3147 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3149 av_log(NULL, AV_LOG_ERROR,
3150 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3153 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3154 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3155 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3156 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3158 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3159 // parse it only for static kf timings
3160 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3161 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3165 case AVMEDIA_TYPE_SUBTITLE:
3166 enc_ctx->time_base = (AVRational){1, 1000};
3167 if (!enc_ctx->width) {
3168 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3169 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3172 case AVMEDIA_TYPE_DATA:
3180 if (ost->disposition) {
3181 static const AVOption opts[] = {
3182 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3183 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3184 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3185 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3186 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3187 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3188 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3189 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3190 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3191 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3192 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3193 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3194 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3195 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3198 static const AVClass class = {
3200 .item_name = av_default_item_name,
3202 .version = LIBAVUTIL_VERSION_INT,
3204 const AVClass *pclass = &class;
3206 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3212 /* init input streams */
3213 for (i = 0; i < nb_input_streams; i++)
3214 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3215 for (i = 0; i < nb_output_streams; i++) {
3216 ost = output_streams[i];
3217 avcodec_close(ost->enc_ctx);
3222 /* open each encoder */
3223 for (i = 0; i < nb_output_streams; i++) {
3224 ret = init_output_stream(output_streams[i], error, sizeof(error));
3229 /* discard unused programs */
3230 for (i = 0; i < nb_input_files; i++) {
3231 InputFile *ifile = input_files[i];
3232 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3233 AVProgram *p = ifile->ctx->programs[j];
3234 int discard = AVDISCARD_ALL;
3236 for (k = 0; k < p->nb_stream_indexes; k++)
3237 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3238 discard = AVDISCARD_DEFAULT;
3241 p->discard = discard;
3245 /* open files and write file headers */
3246 for (i = 0; i < nb_output_files; i++) {
3247 oc = output_files[i]->ctx;
3248 oc->interrupt_callback = int_cb;
3249 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3250 snprintf(error, sizeof(error),
3251 "Could not write header for output file #%d "
3252 "(incorrect codec parameters ?): %s",
3253 i, av_err2str(ret));
3254 ret = AVERROR(EINVAL);
3257 // assert_avoptions(output_files[i]->opts);
3258 if (strcmp(oc->oformat->name, "rtp")) {
3264 /* dump the file output parameters - cannot be done before in case
3266 for (i = 0; i < nb_output_files; i++) {
3267 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3270 /* dump the stream mapping */
3271 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3272 for (i = 0; i < nb_input_streams; i++) {
3273 ist = input_streams[i];
3275 for (j = 0; j < ist->nb_filters; j++) {
3276 if (ist->filters[j]->graph->graph_desc) {
3277 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3278 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3279 ist->filters[j]->name);
3280 if (nb_filtergraphs > 1)
3281 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3282 av_log(NULL, AV_LOG_INFO, "\n");
3287 for (i = 0; i < nb_output_streams; i++) {
3288 ost = output_streams[i];
3290 if (ost->attachment_filename) {
3291 /* an attached file */
3292 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3293 ost->attachment_filename, ost->file_index, ost->index);
3297 if (ost->filter && ost->filter->graph->graph_desc) {
3298 /* output from a complex graph */
3299 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3300 if (nb_filtergraphs > 1)
3301 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3303 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3304 ost->index, ost->enc ? ost->enc->name : "?");
3308 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3309 input_streams[ost->source_index]->file_index,
3310 input_streams[ost->source_index]->st->index,
3313 if (ost->sync_ist != input_streams[ost->source_index])
3314 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3315 ost->sync_ist->file_index,
3316 ost->sync_ist->st->index);
3317 if (ost->stream_copy)
3318 av_log(NULL, AV_LOG_INFO, " (copy)");
3320 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3321 const AVCodec *out_codec = ost->enc;
3322 const char *decoder_name = "?";
3323 const char *in_codec_name = "?";
3324 const char *encoder_name = "?";
3325 const char *out_codec_name = "?";
3326 const AVCodecDescriptor *desc;
3329 decoder_name = in_codec->name;
3330 desc = avcodec_descriptor_get(in_codec->id);
3332 in_codec_name = desc->name;
3333 if (!strcmp(decoder_name, in_codec_name))
3334 decoder_name = "native";
3338 encoder_name = out_codec->name;
3339 desc = avcodec_descriptor_get(out_codec->id);
3341 out_codec_name = desc->name;
3342 if (!strcmp(encoder_name, out_codec_name))
3343 encoder_name = "native";
3346 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3347 in_codec_name, decoder_name,
3348 out_codec_name, encoder_name);
3350 av_log(NULL, AV_LOG_INFO, "\n");
3354 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3358 if (sdp_filename || want_sdp) {
3362 transcode_init_done = 1;
3367 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3368 static int need_output(void)
3372 for (i = 0; i < nb_output_streams; i++) {
3373 OutputStream *ost = output_streams[i];
3374 OutputFile *of = output_files[ost->file_index];
3375 AVFormatContext *os = output_files[ost->file_index]->ctx;
3377 if (ost->finished ||
3378 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3380 if (ost->frame_number >= ost->max_frames) {
3382 for (j = 0; j < of->ctx->nb_streams; j++)
3383 close_output_stream(output_streams[of->ost_index + j]);
3394 * Select the output stream to process.
3396 * @return selected output stream, or NULL if none available
3398 static OutputStream *choose_output(void)
3401 int64_t opts_min = INT64_MAX;
3402 OutputStream *ost_min = NULL;
3404 for (i = 0; i < nb_output_streams; i++) {
3405 OutputStream *ost = output_streams[i];
3406 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3407 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3409 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3410 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3412 if (!ost->finished && opts < opts_min) {
3414 ost_min = ost->unavailable ? NULL : ost;
3420 static void set_tty_echo(int on)
3424 if (tcgetattr(0, &tty) == 0) {
3425 if (on) tty.c_lflag |= ECHO;
3426 else tty.c_lflag &= ~ECHO;
3427 tcsetattr(0, TCSANOW, &tty);
3432 static int check_keyboard_interaction(int64_t cur_time)
3435 static int64_t last_time;
3436 if (received_nb_signals)
3437 return AVERROR_EXIT;
3438 /* read_key() returns 0 on EOF */
3439 if(cur_time - last_time >= 100000 && !run_as_daemon){
3441 last_time = cur_time;
3445 return AVERROR_EXIT;
3446 if (key == '+') av_log_set_level(av_log_get_level()+10);
3447 if (key == '-') av_log_set_level(av_log_get_level()-10);
3448 if (key == 's') qp_hist ^= 1;
3451 do_hex_dump = do_pkt_dump = 0;
3452 } else if(do_pkt_dump){
3456 av_log_set_level(AV_LOG_DEBUG);
3458 if (key == 'c' || key == 'C'){
3459 char buf[4096], target[64], command[256], arg[256] = {0};
3462 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3465 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3470 fprintf(stderr, "\n");
3472 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3473 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3474 target, time, command, arg);
3475 for (i = 0; i < nb_filtergraphs; i++) {
3476 FilterGraph *fg = filtergraphs[i];
3479 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3480 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3481 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3482 } else if (key == 'c') {
3483 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3484 ret = AVERROR_PATCHWELCOME;
3486 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3488 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3493 av_log(NULL, AV_LOG_ERROR,
3494 "Parse error, at least 3 arguments were expected, "
3495 "only %d given in string '%s'\n", n, buf);
3498 if (key == 'd' || key == 'D'){
3501 debug = input_streams[0]->st->codec->debug<<1;
3502 if(!debug) debug = 1;
3503 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3510 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3515 fprintf(stderr, "\n");
3516 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3517 fprintf(stderr,"error parsing debug value\n");
3519 for(i=0;i<nb_input_streams;i++) {
3520 input_streams[i]->st->codec->debug = debug;
3522 for(i=0;i<nb_output_streams;i++) {
3523 OutputStream *ost = output_streams[i];
3524 ost->enc_ctx->debug = debug;
3526 if(debug) av_log_set_level(AV_LOG_DEBUG);
3527 fprintf(stderr,"debug=%d\n", debug);
3530 fprintf(stderr, "key function\n"
3531 "? show this help\n"
3532 "+ increase verbosity\n"
3533 "- decrease verbosity\n"
3534 "c Send command to first matching filter supporting it\n"
3535 "C Send/Que command to all matching filters\n"
3536 "D cycle through available debug modes\n"
3537 "h dump packets/hex press to cycle through the 3 states\n"
3539 "s Show QP histogram\n"
3546 static void *input_thread(void *arg)
3549 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3554 ret = av_read_frame(f->ctx, &pkt);
3556 if (ret == AVERROR(EAGAIN)) {
3561 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3564 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3565 if (flags && ret == AVERROR(EAGAIN)) {
3567 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3568 av_log(f->ctx, AV_LOG_WARNING,
3569 "Thread message queue blocking; consider raising the "
3570 "thread_queue_size option (current value: %d)\n",
3571 f->thread_queue_size);
3574 if (ret != AVERROR_EOF)
3575 av_log(f->ctx, AV_LOG_ERROR,
3576 "Unable to send packet to main thread: %s\n",
3578 av_packet_unref(&pkt);
3579 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3587 static void free_input_threads(void)
3591 for (i = 0; i < nb_input_files; i++) {
3592 InputFile *f = input_files[i];
3595 if (!f || !f->in_thread_queue)
3597 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3598 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3599 av_packet_unref(&pkt);
3601 pthread_join(f->thread, NULL);
3603 av_thread_message_queue_free(&f->in_thread_queue);
3607 static int init_input_threads(void)
3611 if (nb_input_files == 1)
3614 for (i = 0; i < nb_input_files; i++) {
3615 InputFile *f = input_files[i];
3617 if (f->ctx->pb ? !f->ctx->pb->seekable :
3618 strcmp(f->ctx->iformat->name, "lavfi"))
3619 f->non_blocking = 1;
3620 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3621 f->thread_queue_size, sizeof(AVPacket));
3625 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3626 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3627 av_thread_message_queue_free(&f->in_thread_queue);
3628 return AVERROR(ret);
3634 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3636 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3638 AV_THREAD_MESSAGE_NONBLOCK : 0);
3642 static int get_input_packet(InputFile *f, AVPacket *pkt)
3646 for (i = 0; i < f->nb_streams; i++) {
3647 InputStream *ist = input_streams[f->ist_index + i];
3648 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3649 int64_t now = av_gettime_relative() - ist->start;
3651 return AVERROR(EAGAIN);
3656 if (nb_input_files > 1)
3657 return get_input_packet_mt(f, pkt);
3659 return av_read_frame(f->ctx, pkt);
3662 static int got_eagain(void)
3665 for (i = 0; i < nb_output_streams; i++)
3666 if (output_streams[i]->unavailable)
3671 static void reset_eagain(void)
3674 for (i = 0; i < nb_input_files; i++)
3675 input_files[i]->eagain = 0;
3676 for (i = 0; i < nb_output_streams; i++)
3677 output_streams[i]->unavailable = 0;
3680 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3681 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3682 AVRational time_base)
3688 return tmp_time_base;
3691 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3694 return tmp_time_base;
3700 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3703 AVCodecContext *avctx;
3704 int i, ret, has_audio = 0;
3705 int64_t duration = 0;
3707 ret = av_seek_frame(is, -1, is->start_time, 0);
3711 for (i = 0; i < ifile->nb_streams; i++) {
3712 ist = input_streams[ifile->ist_index + i];
3713 avctx = ist->dec_ctx;
3716 if (ist->decoding_needed) {
3717 process_input_packet(ist, NULL, 1);
3718 avcodec_flush_buffers(avctx);
3721 /* duration is the length of the last frame in a stream
3722 * when audio stream is present we don't care about
3723 * last video frame length because it's not defined exactly */
3724 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3728 for (i = 0; i < ifile->nb_streams; i++) {
3729 ist = input_streams[ifile->ist_index + i];
3730 avctx = ist->dec_ctx;
3733 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3734 AVRational sample_rate = {1, avctx->sample_rate};
3736 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3740 if (ist->framerate.num) {
3741 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3742 } else if (ist->st->avg_frame_rate.num) {
3743 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3744 } else duration = 1;
3746 if (!ifile->duration)
3747 ifile->time_base = ist->st->time_base;
3748 /* the total duration of the stream, max_pts - min_pts is
3749 * the duration of the stream without the last frame */
3750 duration += ist->max_pts - ist->min_pts;
3751 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3755 if (ifile->loop > 0)
3763 * - 0 -- one packet was read and processed
3764 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3765 * this function should be called again
3766 * - AVERROR_EOF -- this function should not be called again
3768 static int process_input(int file_index)
3770 InputFile *ifile = input_files[file_index];
3771 AVFormatContext *is;
3779 ret = get_input_packet(ifile, &pkt);
3781 if (ret == AVERROR(EAGAIN)) {
3785 if (ret < 0 && ifile->loop) {
3786 if ((ret = seek_to_start(ifile, is)) < 0)
3788 ret = get_input_packet(ifile, &pkt);
3791 if (ret != AVERROR_EOF) {
3792 print_error(is->filename, ret);
3797 for (i = 0; i < ifile->nb_streams; i++) {
3798 ist = input_streams[ifile->ist_index + i];
3799 if (ist->decoding_needed) {
3800 ret = process_input_packet(ist, NULL, 0);
3805 /* mark all outputs that don't go through lavfi as finished */
3806 for (j = 0; j < nb_output_streams; j++) {
3807 OutputStream *ost = output_streams[j];
3809 if (ost->source_index == ifile->ist_index + i &&
3810 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3811 finish_output_stream(ost);
3815 ifile->eof_reached = 1;
3816 return AVERROR(EAGAIN);
3822 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3823 is->streams[pkt.stream_index]);
3825 /* the following test is needed in case new streams appear
3826 dynamically in stream : we ignore them */
3827 if (pkt.stream_index >= ifile->nb_streams) {
3828 report_new_stream(file_index, &pkt);
3829 goto discard_packet;
3832 ist = input_streams[ifile->ist_index + pkt.stream_index];
3834 ist->data_size += pkt.size;
3838 goto discard_packet;
3840 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3841 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3846 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3847 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3848 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3849 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3850 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3851 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3852 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3853 av_ts2str(input_files[ist->file_index]->ts_offset),
3854 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3857 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3858 int64_t stime, stime2;
3859 // Correcting starttime based on the enabled streams
3860 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3861 // so we instead do it here as part of discontinuity handling
3862 if ( ist->next_dts == AV_NOPTS_VALUE
3863 && ifile->ts_offset == -is->start_time
3864 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3865 int64_t new_start_time = INT64_MAX;
3866 for (i=0; i<is->nb_streams; i++) {
3867 AVStream *st = is->streams[i];
3868 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3870 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3872 if (new_start_time > is->start_time) {
3873 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3874 ifile->ts_offset = -new_start_time;
3878 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3879 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3880 ist->wrap_correction_done = 1;
3882 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3883 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3884 ist->wrap_correction_done = 0;
3886 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3887 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3888 ist->wrap_correction_done = 0;
3892 /* add the stream-global side data to the first packet */
3893 if (ist->nb_packets == 1) {
3894 if (ist->st->nb_side_data)
3895 av_packet_split_side_data(&pkt);
3896 for (i = 0; i < ist->st->nb_side_data; i++) {
3897 AVPacketSideData *src_sd = &ist->st->side_data[i];
3900 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3902 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3905 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3909 memcpy(dst_data, src_sd->data, src_sd->size);
3913 if (pkt.dts != AV_NOPTS_VALUE)
3914 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3915 if (pkt.pts != AV_NOPTS_VALUE)
3916 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3918 if (pkt.pts != AV_NOPTS_VALUE)
3919 pkt.pts *= ist->ts_scale;
3920 if (pkt.dts != AV_NOPTS_VALUE)
3921 pkt.dts *= ist->ts_scale;
3923 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3924 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3925 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3926 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3927 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3928 int64_t delta = pkt_dts - ifile->last_ts;
3929 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3930 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3931 ifile->ts_offset -= delta;
3932 av_log(NULL, AV_LOG_DEBUG,
3933 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3934 delta, ifile->ts_offset);
3935 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3936 if (pkt.pts != AV_NOPTS_VALUE)
3937 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3941 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3942 if (pkt.pts != AV_NOPTS_VALUE) {
3943 pkt.pts += duration;
3944 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3945 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3948 if (pkt.dts != AV_NOPTS_VALUE)
3949 pkt.dts += duration;
3951 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3952 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3953 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3954 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3956 int64_t delta = pkt_dts - ist->next_dts;
3957 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3958 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3959 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3960 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3961 ifile->ts_offset -= delta;
3962 av_log(NULL, AV_LOG_DEBUG,
3963 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3964 delta, ifile->ts_offset);
3965 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3966 if (pkt.pts != AV_NOPTS_VALUE)
3967 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3970 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3971 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3972 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3973 pkt.dts = AV_NOPTS_VALUE;
3975 if (pkt.pts != AV_NOPTS_VALUE){
3976 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3977 delta = pkt_pts - ist->next_dts;
3978 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3979 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3980 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3981 pkt.pts = AV_NOPTS_VALUE;
3987 if (pkt.dts != AV_NOPTS_VALUE)
3988 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3991 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3992 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3993 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3994 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3995 av_ts2str(input_files[ist->file_index]->ts_offset),
3996 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3999 sub2video_heartbeat(ist, pkt.pts);
4001 process_input_packet(ist, &pkt, 0);
4004 av_packet_unref(&pkt);
4010 * Perform a step of transcoding for the specified filter graph.
4012 * @param[in] graph filter graph to consider
4013 * @param[out] best_ist input stream where a frame would allow to continue
4014 * @return 0 for success, <0 for error
4016 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4019 int nb_requests, nb_requests_max = 0;
4020 InputFilter *ifilter;
4024 ret = avfilter_graph_request_oldest(graph->graph);
4026 return reap_filters(0);
4028 if (ret == AVERROR_EOF) {
4029 ret = reap_filters(1);
4030 for (i = 0; i < graph->nb_outputs; i++)
4031 close_output_stream(graph->outputs[i]->ost);
4034 if (ret != AVERROR(EAGAIN))
4037 for (i = 0; i < graph->nb_inputs; i++) {
4038 ifilter = graph->inputs[i];
4040 if (input_files[ist->file_index]->eagain ||
4041 input_files[ist->file_index]->eof_reached)
4043 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4044 if (nb_requests > nb_requests_max) {
4045 nb_requests_max = nb_requests;
4051 for (i = 0; i < graph->nb_outputs; i++)
4052 graph->outputs[i]->ost->unavailable = 1;
4058 * Run a single step of transcoding.
4060 * @return 0 for success, <0 for error
4062 static int transcode_step(void)
4068 ost = choose_output();
4075 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4080 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4085 av_assert0(ost->source_index >= 0);
4086 ist = input_streams[ost->source_index];
4089 ret = process_input(ist->file_index);
4090 if (ret == AVERROR(EAGAIN)) {
4091 if (input_files[ist->file_index]->eagain)
4092 ost->unavailable = 1;
4097 return ret == AVERROR_EOF ? 0 : ret;
4099 return reap_filters(0);
4103 * The following code is the main loop of the file converter
4105 static int transcode(void)
4108 AVFormatContext *os;
4111 int64_t timer_start;
4112 int64_t total_packets_written = 0;
4114 ret = transcode_init();
4118 if (stdin_interaction) {
4119 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4122 timer_start = av_gettime_relative();
4125 if ((ret = init_input_threads()) < 0)
4129 while (!received_sigterm) {
4130 int64_t cur_time= av_gettime_relative();
4132 /* if 'q' pressed, exits */
4133 if (stdin_interaction)
4134 if (check_keyboard_interaction(cur_time) < 0)
4137 /* check if there's any stream where output is still needed */
4138 if (!need_output()) {
4139 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4143 ret = transcode_step();
4144 if (ret < 0 && ret != AVERROR_EOF) {
4146 av_strerror(ret, errbuf, sizeof(errbuf));
4148 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4152 /* dump report by using the output first video and audio streams */
4153 print_report(0, timer_start, cur_time);
4156 free_input_threads();
4159 /* at the end of stream, we must flush the decoder buffers */
4160 for (i = 0; i < nb_input_streams; i++) {
4161 ist = input_streams[i];
4162 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4163 process_input_packet(ist, NULL, 0);
4170 /* write the trailer if needed and close file */
4171 for (i = 0; i < nb_output_files; i++) {
4172 os = output_files[i]->ctx;
4173 if ((ret = av_write_trailer(os)) < 0) {
4174 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4180 /* dump report by using the first video and audio streams */
4181 print_report(1, timer_start, av_gettime_relative());
4183 /* close each encoder */
4184 for (i = 0; i < nb_output_streams; i++) {
4185 ost = output_streams[i];
4186 if (ost->encoding_needed) {
4187 av_freep(&ost->enc_ctx->stats_in);
4189 total_packets_written += ost->packets_written;
4192 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4193 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4197 /* close each decoder */
4198 for (i = 0; i < nb_input_streams; i++) {
4199 ist = input_streams[i];
4200 if (ist->decoding_needed) {
4201 avcodec_close(ist->dec_ctx);
4202 if (ist->hwaccel_uninit)
4203 ist->hwaccel_uninit(ist->dec_ctx);
4212 free_input_threads();
4215 if (output_streams) {
4216 for (i = 0; i < nb_output_streams; i++) {
4217 ost = output_streams[i];
4220 if (fclose(ost->logfile))
4221 av_log(NULL, AV_LOG_ERROR,
4222 "Error closing logfile, loss of information possible: %s\n",
4223 av_err2str(AVERROR(errno)));
4224 ost->logfile = NULL;
4226 av_freep(&ost->forced_kf_pts);
4227 av_freep(&ost->apad);
4228 av_freep(&ost->disposition);
4229 av_dict_free(&ost->encoder_opts);
4230 av_dict_free(&ost->sws_dict);
4231 av_dict_free(&ost->swr_opts);
4232 av_dict_free(&ost->resample_opts);
4240 static int64_t getutime(void)
4243 struct rusage rusage;
4245 getrusage(RUSAGE_SELF, &rusage);
4246 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4247 #elif HAVE_GETPROCESSTIMES
4249 FILETIME c, e, k, u;
4250 proc = GetCurrentProcess();
4251 GetProcessTimes(proc, &c, &e, &k, &u);
4252 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4254 return av_gettime_relative();
4258 static int64_t getmaxrss(void)
4260 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4261 struct rusage rusage;
4262 getrusage(RUSAGE_SELF, &rusage);
4263 return (int64_t)rusage.ru_maxrss * 1024;
4264 #elif HAVE_GETPROCESSMEMORYINFO
4266 PROCESS_MEMORY_COUNTERS memcounters;
4267 proc = GetCurrentProcess();
4268 memcounters.cb = sizeof(memcounters);
4269 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4270 return memcounters.PeakPagefileUsage;
4276 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4280 int main(int argc, char **argv)
4285 register_exit(ffmpeg_cleanup);
4287 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4289 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4290 parse_loglevel(argc, argv, options);
4292 if(argc>1 && !strcmp(argv[1], "-d")){
4294 av_log_set_callback(log_callback_null);
4299 avcodec_register_all();
4301 avdevice_register_all();
4303 avfilter_register_all();
4305 avformat_network_init();
4307 show_banner(argc, argv, options);
4311 /* parse options and open all input/output files */
4312 ret = ffmpeg_parse_options(argc, argv);
4316 if (nb_output_files <= 0 && nb_input_files == 0) {
4318 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4322 /* file converter / grab */
4323 if (nb_output_files <= 0) {
4324 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4328 // if (nb_input_files == 0) {
4329 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4333 current_time = ti = getutime();
4334 if (transcode() < 0)
4336 ti = getutime() - ti;
4338 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4340 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4341 decode_error_stat[0], decode_error_stat[1]);
4342 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4345 exit_program(received_nb_signals ? 255 : main_return_code);
4346 return main_return_code;