2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 avcodec_free_context(&ost->enc_ctx);
532 av_freep(&output_streams[i]);
535 free_input_threads();
537 for (i = 0; i < nb_input_files; i++) {
538 avformat_close_input(&input_files[i]->ctx);
539 av_freep(&input_files[i]);
541 for (i = 0; i < nb_input_streams; i++) {
542 InputStream *ist = input_streams[i];
544 av_frame_free(&ist->decoded_frame);
545 av_frame_free(&ist->filter_frame);
546 av_dict_free(&ist->decoder_opts);
547 avsubtitle_free(&ist->prev_sub.subtitle);
548 av_frame_free(&ist->sub2video.frame);
549 av_freep(&ist->filters);
550 av_freep(&ist->hwaccel_device);
552 avcodec_free_context(&ist->dec_ctx);
554 av_freep(&input_streams[i]);
559 av_freep(&vstats_filename);
561 av_freep(&input_streams);
562 av_freep(&input_files);
563 av_freep(&output_streams);
564 av_freep(&output_files);
568 avformat_network_deinit();
570 if (received_sigterm) {
571 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
572 (int) received_sigterm);
573 } else if (ret && transcode_init_done) {
574 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
580 void remove_avoptions(AVDictionary **a, AVDictionary *b)
582 AVDictionaryEntry *t = NULL;
584 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
585 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
589 void assert_avoptions(AVDictionary *m)
591 AVDictionaryEntry *t;
592 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
593 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
598 static void abort_codec_experimental(AVCodec *c, int encoder)
603 static void update_benchmark(const char *fmt, ...)
605 if (do_benchmark_all) {
606 int64_t t = getutime();
612 vsnprintf(buf, sizeof(buf), fmt, va);
614 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
620 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
623 for (i = 0; i < nb_output_streams; i++) {
624 OutputStream *ost2 = output_streams[i];
625 ost2->finished |= ost == ost2 ? this_stream : others;
629 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
631 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
632 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
635 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
636 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
637 if (ost->st->codec->extradata) {
638 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
639 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
643 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
644 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
645 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
648 * Audio encoders may split the packets -- #frames in != #packets out.
649 * But there is no reordering, so we can limit the number of output packets
650 * by simply dropping them here.
651 * Counting encoded video frames needs to be done separately because of
652 * reordering, see do_video_out()
654 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
655 if (ost->frame_number >= ost->max_frames) {
656 av_packet_unref(pkt);
661 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
663 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
665 ost->quality = sd ? AV_RL32(sd) : -1;
666 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
668 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
670 ost->error[i] = AV_RL64(sd + 8 + 8*i);
675 if (ost->frame_rate.num && ost->is_cfr) {
676 if (pkt->duration > 0)
677 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
678 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
684 av_packet_split_side_data(pkt);
687 AVPacket new_pkt = *pkt;
688 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
691 int a = av_bitstream_filter_filter(bsfc, avctx,
692 bsf_arg ? bsf_arg->value : NULL,
693 &new_pkt.data, &new_pkt.size,
694 pkt->data, pkt->size,
695 pkt->flags & AV_PKT_FLAG_KEY);
696 if(a == 0 && new_pkt.data != pkt->data) {
697 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
699 memcpy(t, new_pkt.data, new_pkt.size);
700 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
708 pkt->side_data = NULL;
709 pkt->side_data_elems = 0;
710 av_packet_unref(pkt);
711 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
712 av_buffer_default_free, NULL, 0);
717 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
718 bsfc->filter->name, pkt->stream_index,
719 avctx->codec ? avctx->codec->name : "copy");
729 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
730 if (pkt->dts != AV_NOPTS_VALUE &&
731 pkt->pts != AV_NOPTS_VALUE &&
732 pkt->dts > pkt->pts) {
733 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
735 ost->file_index, ost->st->index);
737 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
738 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
739 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
742 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
743 pkt->dts != AV_NOPTS_VALUE &&
744 ost->last_mux_dts != AV_NOPTS_VALUE) {
745 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
746 if (pkt->dts < max) {
747 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
748 av_log(s, loglevel, "Non-monotonous DTS in output stream "
749 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
750 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
752 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
755 av_log(s, loglevel, "changing to %"PRId64". This may result "
756 "in incorrect timestamps in the output file.\n",
758 if(pkt->pts >= pkt->dts)
759 pkt->pts = FFMAX(pkt->pts, max);
764 ost->last_mux_dts = pkt->dts;
766 ost->data_size += pkt->size;
767 ost->packets_written++;
769 pkt->stream_index = ost->index;
772 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
773 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
774 av_get_media_type_string(ost->enc_ctx->codec_type),
775 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
776 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
781 ret = av_interleaved_write_frame(s, pkt);
783 print_error("av_interleaved_write_frame()", ret);
784 main_return_code = 1;
785 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
787 av_packet_unref(pkt);
790 static void close_output_stream(OutputStream *ost)
792 OutputFile *of = output_files[ost->file_index];
794 ost->finished |= ENCODER_FINISHED;
796 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
797 of->recording_time = FFMIN(of->recording_time, end);
801 static int check_recording_time(OutputStream *ost)
803 OutputFile *of = output_files[ost->file_index];
805 if (of->recording_time != INT64_MAX &&
806 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
807 AV_TIME_BASE_Q) >= 0) {
808 close_output_stream(ost);
814 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
817 AVCodecContext *enc = ost->enc_ctx;
821 av_init_packet(&pkt);
825 if (!check_recording_time(ost))
828 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
829 frame->pts = ost->sync_opts;
830 ost->sync_opts = frame->pts + frame->nb_samples;
831 ost->samples_encoded += frame->nb_samples;
832 ost->frames_encoded++;
834 av_assert0(pkt.size || !pkt.data);
835 update_benchmark(NULL);
837 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
838 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
839 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
840 enc->time_base.num, enc->time_base.den);
843 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
844 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
847 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
850 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
853 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
854 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
855 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
856 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
859 write_frame(s, &pkt, ost);
863 static void do_subtitle_out(AVFormatContext *s,
868 int subtitle_out_max_size = 1024 * 1024;
869 int subtitle_out_size, nb, i;
874 if (sub->pts == AV_NOPTS_VALUE) {
875 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
884 subtitle_out = av_malloc(subtitle_out_max_size);
886 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
891 /* Note: DVB subtitle need one packet to draw them and one other
892 packet to clear them */
893 /* XXX: signal it in the codec context ? */
894 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
899 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
901 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
902 pts -= output_files[ost->file_index]->start_time;
903 for (i = 0; i < nb; i++) {
904 unsigned save_num_rects = sub->num_rects;
906 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
907 if (!check_recording_time(ost))
911 // start_display_time is required to be 0
912 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
913 sub->end_display_time -= sub->start_display_time;
914 sub->start_display_time = 0;
918 ost->frames_encoded++;
920 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
921 subtitle_out_max_size, sub);
923 sub->num_rects = save_num_rects;
924 if (subtitle_out_size < 0) {
925 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
929 av_init_packet(&pkt);
930 pkt.data = subtitle_out;
931 pkt.size = subtitle_out_size;
932 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
933 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
934 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
935 /* XXX: the pts correction is handled here. Maybe handling
936 it in the codec would be better */
938 pkt.pts += 90 * sub->start_display_time;
940 pkt.pts += 90 * sub->end_display_time;
943 write_frame(s, &pkt, ost);
947 static void do_video_out(AVFormatContext *s,
949 AVFrame *next_picture,
952 int ret, format_video_sync;
954 AVCodecContext *enc = ost->enc_ctx;
955 AVCodecContext *mux_enc = ost->st->codec;
956 int nb_frames, nb0_frames, i;
957 double delta, delta0;
960 InputStream *ist = NULL;
961 AVFilterContext *filter = ost->filter->filter;
963 if (ost->source_index >= 0)
964 ist = input_streams[ost->source_index];
966 if (filter->inputs[0]->frame_rate.num > 0 &&
967 filter->inputs[0]->frame_rate.den > 0)
968 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
970 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
971 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
973 if (!ost->filters_script &&
977 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
978 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
983 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
984 ost->last_nb0_frames[1],
985 ost->last_nb0_frames[2]);
987 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
988 delta = delta0 + duration;
990 /* by default, we output a single frame */
991 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
994 format_video_sync = video_sync_method;
995 if (format_video_sync == VSYNC_AUTO) {
996 if(!strcmp(s->oformat->name, "avi")) {
997 format_video_sync = VSYNC_VFR;
999 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1001 && format_video_sync == VSYNC_CFR
1002 && input_files[ist->file_index]->ctx->nb_streams == 1
1003 && input_files[ist->file_index]->input_ts_offset == 0) {
1004 format_video_sync = VSYNC_VSCFR;
1006 if (format_video_sync == VSYNC_CFR && copy_ts) {
1007 format_video_sync = VSYNC_VSCFR;
1010 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1014 format_video_sync != VSYNC_PASSTHROUGH &&
1015 format_video_sync != VSYNC_DROP) {
1016 if (delta0 < -0.6) {
1017 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1019 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1020 sync_ipts = ost->sync_opts;
1025 switch (format_video_sync) {
1027 if (ost->frame_number == 0 && delta0 >= 0.5) {
1028 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1031 ost->sync_opts = lrint(sync_ipts);
1034 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1035 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1037 } else if (delta < -1.1)
1039 else if (delta > 1.1) {
1040 nb_frames = lrintf(delta);
1042 nb0_frames = lrintf(delta0 - 0.6);
1048 else if (delta > 0.6)
1049 ost->sync_opts = lrint(sync_ipts);
1052 case VSYNC_PASSTHROUGH:
1053 ost->sync_opts = lrint(sync_ipts);
1060 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1061 nb0_frames = FFMIN(nb0_frames, nb_frames);
1063 memmove(ost->last_nb0_frames + 1,
1064 ost->last_nb0_frames,
1065 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1066 ost->last_nb0_frames[0] = nb0_frames;
1068 if (nb0_frames == 0 && ost->last_dropped) {
1070 av_log(NULL, AV_LOG_VERBOSE,
1071 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1072 ost->frame_number, ost->st->index, ost->last_frame->pts);
1074 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1075 if (nb_frames > dts_error_threshold * 30) {
1076 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1080 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1081 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1083 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1085 /* duplicates frame if needed */
1086 for (i = 0; i < nb_frames; i++) {
1087 AVFrame *in_picture;
1088 av_init_packet(&pkt);
1092 if (i < nb0_frames && ost->last_frame) {
1093 in_picture = ost->last_frame;
1095 in_picture = next_picture;
1100 in_picture->pts = ost->sync_opts;
1103 if (!check_recording_time(ost))
1105 if (ost->frame_number >= ost->max_frames)
1109 #if FF_API_LAVF_FMT_RAWPICTURE
1110 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1111 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1112 /* raw pictures are written as AVPicture structure to
1113 avoid any copies. We support temporarily the older
1115 if (in_picture->interlaced_frame)
1116 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1118 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1119 pkt.data = (uint8_t *)in_picture;
1120 pkt.size = sizeof(AVPicture);
1121 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1122 pkt.flags |= AV_PKT_FLAG_KEY;
1124 write_frame(s, &pkt, ost);
1128 int got_packet, forced_keyframe = 0;
1131 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1132 ost->top_field_first >= 0)
1133 in_picture->top_field_first = !!ost->top_field_first;
1135 if (in_picture->interlaced_frame) {
1136 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1137 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1139 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1141 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1143 in_picture->quality = enc->global_quality;
1144 in_picture->pict_type = 0;
1146 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1147 in_picture->pts * av_q2d(enc->time_base) : NAN;
1148 if (ost->forced_kf_index < ost->forced_kf_count &&
1149 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1150 ost->forced_kf_index++;
1151 forced_keyframe = 1;
1152 } else if (ost->forced_keyframes_pexpr) {
1154 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1155 res = av_expr_eval(ost->forced_keyframes_pexpr,
1156 ost->forced_keyframes_expr_const_values, NULL);
1157 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1158 ost->forced_keyframes_expr_const_values[FKF_N],
1159 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1160 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1161 ost->forced_keyframes_expr_const_values[FKF_T],
1162 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1165 forced_keyframe = 1;
1166 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1167 ost->forced_keyframes_expr_const_values[FKF_N];
1168 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1169 ost->forced_keyframes_expr_const_values[FKF_T];
1170 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1173 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1174 } else if ( ost->forced_keyframes
1175 && !strncmp(ost->forced_keyframes, "source", 6)
1176 && in_picture->key_frame==1) {
1177 forced_keyframe = 1;
1180 if (forced_keyframe) {
1181 in_picture->pict_type = AV_PICTURE_TYPE_I;
1182 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1185 update_benchmark(NULL);
1187 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1188 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1189 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1190 enc->time_base.num, enc->time_base.den);
1193 ost->frames_encoded++;
1195 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1196 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1198 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1204 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1205 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1206 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1207 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1210 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1211 pkt.pts = ost->sync_opts;
1213 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1216 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1217 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1218 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1219 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1222 frame_size = pkt.size;
1223 write_frame(s, &pkt, ost);
1225 /* if two pass, output log */
1226 if (ost->logfile && enc->stats_out) {
1227 fprintf(ost->logfile, "%s", enc->stats_out);
1233 * For video, number of frames in == number of packets out.
1234 * But there may be reordering, so we can't throw away frames on encoder
1235 * flush, we need to limit them here, before they go into encoder.
1237 ost->frame_number++;
1239 if (vstats_filename && frame_size)
1240 do_video_stats(ost, frame_size);
1243 if (!ost->last_frame)
1244 ost->last_frame = av_frame_alloc();
1245 av_frame_unref(ost->last_frame);
1246 if (next_picture && ost->last_frame)
1247 av_frame_ref(ost->last_frame, next_picture);
1249 av_frame_free(&ost->last_frame);
1252 static double psnr(double d)
1254 return -10.0 * log10(d);
1257 static void do_video_stats(OutputStream *ost, int frame_size)
1259 AVCodecContext *enc;
1261 double ti1, bitrate, avg_bitrate;
1263 /* this is executed just the first time do_video_stats is called */
1265 vstats_file = fopen(vstats_filename, "w");
1273 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1274 frame_number = ost->st->nb_frames;
1275 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1276 ost->quality / (float)FF_QP2LAMBDA);
1278 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1279 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1281 fprintf(vstats_file,"f_size= %6d ", frame_size);
1282 /* compute pts value */
1283 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1287 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1288 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1289 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1290 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1291 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1295 static void finish_output_stream(OutputStream *ost)
1297 OutputFile *of = output_files[ost->file_index];
1300 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1303 for (i = 0; i < of->ctx->nb_streams; i++)
1304 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1309 * Get and encode new output from any of the filtergraphs, without causing
1312 * @return 0 for success, <0 for severe errors
1314 static int reap_filters(int flush)
1316 AVFrame *filtered_frame = NULL;
1319 /* Reap all buffers present in the buffer sinks */
1320 for (i = 0; i < nb_output_streams; i++) {
1321 OutputStream *ost = output_streams[i];
1322 OutputFile *of = output_files[ost->file_index];
1323 AVFilterContext *filter;
1324 AVCodecContext *enc = ost->enc_ctx;
1329 filter = ost->filter->filter;
1331 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1332 return AVERROR(ENOMEM);
1334 filtered_frame = ost->filtered_frame;
1337 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1338 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1339 AV_BUFFERSINK_FLAG_NO_REQUEST);
1341 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1342 av_log(NULL, AV_LOG_WARNING,
1343 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1344 } else if (flush && ret == AVERROR_EOF) {
1345 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1346 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1350 if (ost->finished) {
1351 av_frame_unref(filtered_frame);
1354 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1355 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1356 AVRational tb = enc->time_base;
1357 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1359 tb.den <<= extra_bits;
1361 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1362 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1363 float_pts /= 1 << extra_bits;
1364 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1365 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1367 filtered_frame->pts =
1368 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1369 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1371 //if (ost->source_index >= 0)
1372 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1374 switch (filter->inputs[0]->type) {
1375 case AVMEDIA_TYPE_VIDEO:
1376 if (!ost->frame_aspect_ratio.num)
1377 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1380 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1381 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1383 enc->time_base.num, enc->time_base.den);
1386 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1388 case AVMEDIA_TYPE_AUDIO:
1389 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1390 enc->channels != av_frame_get_channels(filtered_frame)) {
1391 av_log(NULL, AV_LOG_ERROR,
1392 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1395 do_audio_out(of->ctx, ost, filtered_frame);
1398 // TODO support subtitle filters
1402 av_frame_unref(filtered_frame);
1409 static void print_final_stats(int64_t total_size)
1411 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1412 uint64_t subtitle_size = 0;
1413 uint64_t data_size = 0;
1414 float percent = -1.0;
1418 for (i = 0; i < nb_output_streams; i++) {
1419 OutputStream *ost = output_streams[i];
1420 switch (ost->enc_ctx->codec_type) {
1421 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1422 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1423 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1424 default: other_size += ost->data_size; break;
1426 extra_size += ost->enc_ctx->extradata_size;
1427 data_size += ost->data_size;
1428 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1429 != AV_CODEC_FLAG_PASS1)
1433 if (data_size && total_size>0 && total_size >= data_size)
1434 percent = 100.0 * (total_size - data_size) / data_size;
1436 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1437 video_size / 1024.0,
1438 audio_size / 1024.0,
1439 subtitle_size / 1024.0,
1440 other_size / 1024.0,
1441 extra_size / 1024.0);
1443 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1445 av_log(NULL, AV_LOG_INFO, "unknown");
1446 av_log(NULL, AV_LOG_INFO, "\n");
1448 /* print verbose per-stream stats */
1449 for (i = 0; i < nb_input_files; i++) {
1450 InputFile *f = input_files[i];
1451 uint64_t total_packets = 0, total_size = 0;
1453 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1454 i, f->ctx->filename);
1456 for (j = 0; j < f->nb_streams; j++) {
1457 InputStream *ist = input_streams[f->ist_index + j];
1458 enum AVMediaType type = ist->dec_ctx->codec_type;
1460 total_size += ist->data_size;
1461 total_packets += ist->nb_packets;
1463 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1464 i, j, media_type_string(type));
1465 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1466 ist->nb_packets, ist->data_size);
1468 if (ist->decoding_needed) {
1469 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1470 ist->frames_decoded);
1471 if (type == AVMEDIA_TYPE_AUDIO)
1472 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1473 av_log(NULL, AV_LOG_VERBOSE, "; ");
1476 av_log(NULL, AV_LOG_VERBOSE, "\n");
1479 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1480 total_packets, total_size);
1483 for (i = 0; i < nb_output_files; i++) {
1484 OutputFile *of = output_files[i];
1485 uint64_t total_packets = 0, total_size = 0;
1487 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1488 i, of->ctx->filename);
1490 for (j = 0; j < of->ctx->nb_streams; j++) {
1491 OutputStream *ost = output_streams[of->ost_index + j];
1492 enum AVMediaType type = ost->enc_ctx->codec_type;
1494 total_size += ost->data_size;
1495 total_packets += ost->packets_written;
1497 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1498 i, j, media_type_string(type));
1499 if (ost->encoding_needed) {
1500 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1501 ost->frames_encoded);
1502 if (type == AVMEDIA_TYPE_AUDIO)
1503 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1504 av_log(NULL, AV_LOG_VERBOSE, "; ");
1507 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1508 ost->packets_written, ost->data_size);
1510 av_log(NULL, AV_LOG_VERBOSE, "\n");
1513 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1514 total_packets, total_size);
1516 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1517 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1519 av_log(NULL, AV_LOG_WARNING, "\n");
1521 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1526 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1529 AVBPrint buf_script;
1531 AVFormatContext *oc;
1533 AVCodecContext *enc;
1534 int frame_number, vid, i;
1537 int64_t pts = INT64_MIN + 1;
1538 static int64_t last_time = -1;
1539 static int qp_histogram[52];
1540 int hours, mins, secs, us;
1543 if (!print_stats && !is_last_report && !progress_avio)
1546 if (!is_last_report) {
1547 if (last_time == -1) {
1548 last_time = cur_time;
1551 if ((cur_time - last_time) < 500000)
1553 last_time = cur_time;
1556 t = (cur_time-timer_start) / 1000000.0;
1559 oc = output_files[0]->ctx;
1561 total_size = avio_size(oc->pb);
1562 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1563 total_size = avio_tell(oc->pb);
1567 av_bprint_init(&buf_script, 0, 1);
1568 for (i = 0; i < nb_output_streams; i++) {
1570 ost = output_streams[i];
1572 if (!ost->stream_copy)
1573 q = ost->quality / (float) FF_QP2LAMBDA;
1575 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1576 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1577 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1578 ost->file_index, ost->index, q);
1580 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1583 frame_number = ost->frame_number;
1584 fps = t > 1 ? frame_number / t : 0;
1585 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1586 frame_number, fps < 9.95, fps, q);
1587 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1588 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1589 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1590 ost->file_index, ost->index, q);
1592 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1596 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1598 for (j = 0; j < 32; j++)
1599 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1602 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1604 double error, error_sum = 0;
1605 double scale, scale_sum = 0;
1607 char type[3] = { 'Y','U','V' };
1608 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1609 for (j = 0; j < 3; j++) {
1610 if (is_last_report) {
1611 error = enc->error[j];
1612 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1614 error = ost->error[j];
1615 scale = enc->width * enc->height * 255.0 * 255.0;
1621 p = psnr(error / scale);
1622 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1623 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1624 ost->file_index, ost->index, type[j] | 32, p);
1626 p = psnr(error_sum / scale_sum);
1627 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1628 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1629 ost->file_index, ost->index, p);
1633 /* compute min output value */
1634 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1635 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1636 ost->st->time_base, AV_TIME_BASE_Q));
1638 nb_frames_drop += ost->last_dropped;
1641 secs = FFABS(pts) / AV_TIME_BASE;
1642 us = FFABS(pts) % AV_TIME_BASE;
1648 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1649 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1651 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1653 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1654 "size=%8.0fkB time=", total_size / 1024.0);
1656 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1658 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1659 (100 * us) / AV_TIME_BASE);
1662 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1663 av_bprintf(&buf_script, "bitrate=N/A\n");
1665 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1666 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1669 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1670 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1671 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1672 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1673 hours, mins, secs, us);
1675 if (nb_frames_dup || nb_frames_drop)
1676 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1677 nb_frames_dup, nb_frames_drop);
1678 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1679 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1682 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1683 av_bprintf(&buf_script, "speed=N/A\n");
1685 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1686 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1689 if (print_stats || is_last_report) {
1690 const char end = is_last_report ? '\n' : '\r';
1691 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1692 fprintf(stderr, "%s %c", buf, end);
1694 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1699 if (progress_avio) {
1700 av_bprintf(&buf_script, "progress=%s\n",
1701 is_last_report ? "end" : "continue");
1702 avio_write(progress_avio, buf_script.str,
1703 FFMIN(buf_script.len, buf_script.size - 1));
1704 avio_flush(progress_avio);
1705 av_bprint_finalize(&buf_script, NULL);
1706 if (is_last_report) {
1707 avio_closep(&progress_avio);
1712 print_final_stats(total_size);
1715 static void flush_encoders(void)
1719 for (i = 0; i < nb_output_streams; i++) {
1720 OutputStream *ost = output_streams[i];
1721 AVCodecContext *enc = ost->enc_ctx;
1722 AVFormatContext *os = output_files[ost->file_index]->ctx;
1723 int stop_encoding = 0;
1725 if (!ost->encoding_needed)
1728 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1730 #if FF_API_LAVF_FMT_RAWPICTURE
1731 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1736 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1739 switch (enc->codec_type) {
1740 case AVMEDIA_TYPE_AUDIO:
1741 encode = avcodec_encode_audio2;
1744 case AVMEDIA_TYPE_VIDEO:
1745 encode = avcodec_encode_video2;
1756 av_init_packet(&pkt);
1760 update_benchmark(NULL);
1761 ret = encode(enc, &pkt, NULL, &got_packet);
1762 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1764 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1769 if (ost->logfile && enc->stats_out) {
1770 fprintf(ost->logfile, "%s", enc->stats_out);
1776 if (ost->finished & MUXER_FINISHED) {
1777 av_packet_unref(&pkt);
1780 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1781 pkt_size = pkt.size;
1782 write_frame(os, &pkt, ost);
1783 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1784 do_video_stats(ost, pkt_size);
1795 * Check whether a packet from ist should be written into ost at this time
1797 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1799 OutputFile *of = output_files[ost->file_index];
1800 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1802 if (ost->source_index != ist_index)
1808 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1814 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1816 OutputFile *of = output_files[ost->file_index];
1817 InputFile *f = input_files [ist->file_index];
1818 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1819 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1823 av_init_packet(&opkt);
1825 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1826 !ost->copy_initial_nonkeyframes)
1829 if (!ost->frame_number && !ost->copy_prior_start) {
1830 int64_t comp_start = start_time;
1831 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1832 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1833 if (pkt->pts == AV_NOPTS_VALUE ?
1834 ist->pts < comp_start :
1835 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1839 if (of->recording_time != INT64_MAX &&
1840 ist->pts >= of->recording_time + start_time) {
1841 close_output_stream(ost);
1845 if (f->recording_time != INT64_MAX) {
1846 start_time = f->ctx->start_time;
1847 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1848 start_time += f->start_time;
1849 if (ist->pts >= f->recording_time + start_time) {
1850 close_output_stream(ost);
1855 /* force the input stream PTS */
1856 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1859 if (pkt->pts != AV_NOPTS_VALUE)
1860 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1862 opkt.pts = AV_NOPTS_VALUE;
1864 if (pkt->dts == AV_NOPTS_VALUE)
1865 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1867 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1868 opkt.dts -= ost_tb_start_time;
1870 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1871 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1873 duration = ist->dec_ctx->frame_size;
1874 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1875 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1876 ost->st->time_base) - ost_tb_start_time;
1879 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1880 opkt.flags = pkt->flags;
1881 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1882 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1883 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1884 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1885 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1887 int ret = av_parser_change(ost->parser, ost->st->codec,
1888 &opkt.data, &opkt.size,
1889 pkt->data, pkt->size,
1890 pkt->flags & AV_PKT_FLAG_KEY);
1892 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1897 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1902 opkt.data = pkt->data;
1903 opkt.size = pkt->size;
1905 av_copy_packet_side_data(&opkt, pkt);
1907 #if FF_API_LAVF_FMT_RAWPICTURE
1908 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1909 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1910 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1911 /* store AVPicture in AVPacket, as expected by the output format */
1912 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1914 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1918 opkt.data = (uint8_t *)&pict;
1919 opkt.size = sizeof(AVPicture);
1920 opkt.flags |= AV_PKT_FLAG_KEY;
1924 write_frame(of->ctx, &opkt, ost);
1927 int guess_input_channel_layout(InputStream *ist)
1929 AVCodecContext *dec = ist->dec_ctx;
1931 if (!dec->channel_layout) {
1932 char layout_name[256];
1934 if (dec->channels > ist->guess_layout_max)
1936 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1937 if (!dec->channel_layout)
1939 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1940 dec->channels, dec->channel_layout);
1941 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1942 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1947 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1949 if (*got_output || ret<0)
1950 decode_error_stat[ret<0] ++;
1952 if (ret < 0 && exit_on_error)
1955 if (exit_on_error && *got_output && ist) {
1956 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1957 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1963 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1965 AVFrame *decoded_frame, *f;
1966 AVCodecContext *avctx = ist->dec_ctx;
1967 int i, ret, err = 0, resample_changed;
1968 AVRational decoded_frame_tb;
1970 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1971 return AVERROR(ENOMEM);
1972 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1973 return AVERROR(ENOMEM);
1974 decoded_frame = ist->decoded_frame;
1976 update_benchmark(NULL);
1977 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1978 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1980 if (ret >= 0 && avctx->sample_rate <= 0) {
1981 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1982 ret = AVERROR_INVALIDDATA;
1985 check_decode_result(ist, got_output, ret);
1987 if (!*got_output || ret < 0)
1990 ist->samples_decoded += decoded_frame->nb_samples;
1991 ist->frames_decoded++;
1994 /* increment next_dts to use for the case where the input stream does not
1995 have timestamps or there are multiple frames in the packet */
1996 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1998 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2002 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2003 ist->resample_channels != avctx->channels ||
2004 ist->resample_channel_layout != decoded_frame->channel_layout ||
2005 ist->resample_sample_rate != decoded_frame->sample_rate;
2006 if (resample_changed) {
2007 char layout1[64], layout2[64];
2009 if (!guess_input_channel_layout(ist)) {
2010 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2011 "layout for Input Stream #%d.%d\n", ist->file_index,
2015 decoded_frame->channel_layout = avctx->channel_layout;
2017 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2018 ist->resample_channel_layout);
2019 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2020 decoded_frame->channel_layout);
2022 av_log(NULL, AV_LOG_INFO,
2023 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2024 ist->file_index, ist->st->index,
2025 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2026 ist->resample_channels, layout1,
2027 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2028 avctx->channels, layout2);
2030 ist->resample_sample_fmt = decoded_frame->format;
2031 ist->resample_sample_rate = decoded_frame->sample_rate;
2032 ist->resample_channel_layout = decoded_frame->channel_layout;
2033 ist->resample_channels = avctx->channels;
2035 for (i = 0; i < nb_filtergraphs; i++)
2036 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2037 FilterGraph *fg = filtergraphs[i];
2038 if (configure_filtergraph(fg) < 0) {
2039 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2045 /* if the decoder provides a pts, use it instead of the last packet pts.
2046 the decoder could be delaying output by a packet or more. */
2047 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2048 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2049 decoded_frame_tb = avctx->time_base;
2050 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2051 decoded_frame->pts = decoded_frame->pkt_pts;
2052 decoded_frame_tb = ist->st->time_base;
2053 } else if (pkt->pts != AV_NOPTS_VALUE) {
2054 decoded_frame->pts = pkt->pts;
2055 decoded_frame_tb = ist->st->time_base;
2057 decoded_frame->pts = ist->dts;
2058 decoded_frame_tb = AV_TIME_BASE_Q;
2060 pkt->pts = AV_NOPTS_VALUE;
2061 if (decoded_frame->pts != AV_NOPTS_VALUE)
2062 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2063 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2064 (AVRational){1, avctx->sample_rate});
2065 ist->nb_samples = decoded_frame->nb_samples;
2066 for (i = 0; i < ist->nb_filters; i++) {
2067 if (i < ist->nb_filters - 1) {
2068 f = ist->filter_frame;
2069 err = av_frame_ref(f, decoded_frame);
2074 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2075 AV_BUFFERSRC_FLAG_PUSH);
2076 if (err == AVERROR_EOF)
2077 err = 0; /* ignore */
2081 decoded_frame->pts = AV_NOPTS_VALUE;
2083 av_frame_unref(ist->filter_frame);
2084 av_frame_unref(decoded_frame);
2085 return err < 0 ? err : ret;
2088 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2090 AVFrame *decoded_frame, *f;
2091 int i, ret = 0, err = 0, resample_changed;
2092 int64_t best_effort_timestamp;
2093 AVRational *frame_sample_aspect;
2095 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2096 return AVERROR(ENOMEM);
2097 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2098 return AVERROR(ENOMEM);
2099 decoded_frame = ist->decoded_frame;
2100 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2102 update_benchmark(NULL);
2103 ret = avcodec_decode_video2(ist->dec_ctx,
2104 decoded_frame, got_output, pkt);
2105 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2107 // The following line may be required in some cases where there is no parser
2108 // or the parser does not has_b_frames correctly
2109 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2110 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2111 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2113 av_log(ist->dec_ctx, AV_LOG_WARNING,
2114 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2115 "If you want to help, upload a sample "
2116 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2117 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2118 ist->dec_ctx->has_b_frames,
2119 ist->st->codec->has_b_frames);
2122 check_decode_result(ist, got_output, ret);
2124 if (*got_output && ret >= 0) {
2125 if (ist->dec_ctx->width != decoded_frame->width ||
2126 ist->dec_ctx->height != decoded_frame->height ||
2127 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2128 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2129 decoded_frame->width,
2130 decoded_frame->height,
2131 decoded_frame->format,
2132 ist->dec_ctx->width,
2133 ist->dec_ctx->height,
2134 ist->dec_ctx->pix_fmt);
2138 if (!*got_output || ret < 0)
2141 if(ist->top_field_first>=0)
2142 decoded_frame->top_field_first = ist->top_field_first;
2144 ist->frames_decoded++;
2146 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2147 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2151 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2153 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2154 if(best_effort_timestamp != AV_NOPTS_VALUE)
2155 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2158 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2159 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2160 ist->st->index, av_ts2str(decoded_frame->pts),
2161 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2162 best_effort_timestamp,
2163 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2164 decoded_frame->key_frame, decoded_frame->pict_type,
2165 ist->st->time_base.num, ist->st->time_base.den);
2170 if (ist->st->sample_aspect_ratio.num)
2171 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2173 resample_changed = ist->resample_width != decoded_frame->width ||
2174 ist->resample_height != decoded_frame->height ||
2175 ist->resample_pix_fmt != decoded_frame->format;
2176 if (resample_changed) {
2177 av_log(NULL, AV_LOG_INFO,
2178 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2179 ist->file_index, ist->st->index,
2180 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2181 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2183 ist->resample_width = decoded_frame->width;
2184 ist->resample_height = decoded_frame->height;
2185 ist->resample_pix_fmt = decoded_frame->format;
2187 for (i = 0; i < nb_filtergraphs; i++) {
2188 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2189 configure_filtergraph(filtergraphs[i]) < 0) {
2190 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2196 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2197 for (i = 0; i < ist->nb_filters; i++) {
2198 if (!frame_sample_aspect->num)
2199 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2201 if (i < ist->nb_filters - 1) {
2202 f = ist->filter_frame;
2203 err = av_frame_ref(f, decoded_frame);
2208 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2209 if (ret == AVERROR_EOF) {
2210 ret = 0; /* ignore */
2211 } else if (ret < 0) {
2212 av_log(NULL, AV_LOG_FATAL,
2213 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2219 av_frame_unref(ist->filter_frame);
2220 av_frame_unref(decoded_frame);
2221 return err < 0 ? err : ret;
2224 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2226 AVSubtitle subtitle;
2227 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2228 &subtitle, got_output, pkt);
2230 check_decode_result(NULL, got_output, ret);
2232 if (ret < 0 || !*got_output) {
2234 sub2video_flush(ist);
2238 if (ist->fix_sub_duration) {
2240 if (ist->prev_sub.got_output) {
2241 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2242 1000, AV_TIME_BASE);
2243 if (end < ist->prev_sub.subtitle.end_display_time) {
2244 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2245 "Subtitle duration reduced from %d to %d%s\n",
2246 ist->prev_sub.subtitle.end_display_time, end,
2247 end <= 0 ? ", dropping it" : "");
2248 ist->prev_sub.subtitle.end_display_time = end;
2251 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2252 FFSWAP(int, ret, ist->prev_sub.ret);
2253 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2261 sub2video_update(ist, &subtitle);
2263 if (!subtitle.num_rects)
2266 ist->frames_decoded++;
2268 for (i = 0; i < nb_output_streams; i++) {
2269 OutputStream *ost = output_streams[i];
2271 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2272 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2275 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2279 avsubtitle_free(&subtitle);
2283 static int send_filter_eof(InputStream *ist)
2286 for (i = 0; i < ist->nb_filters; i++) {
2287 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2294 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2295 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2301 if (!ist->saw_first_ts) {
2302 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2304 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2305 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2306 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2308 ist->saw_first_ts = 1;
2311 if (ist->next_dts == AV_NOPTS_VALUE)
2312 ist->next_dts = ist->dts;
2313 if (ist->next_pts == AV_NOPTS_VALUE)
2314 ist->next_pts = ist->pts;
2318 av_init_packet(&avpkt);
2326 if (pkt->dts != AV_NOPTS_VALUE) {
2327 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2328 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2329 ist->next_pts = ist->pts = ist->dts;
2332 // while we have more to decode or while the decoder did output something on EOF
2333 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2337 ist->pts = ist->next_pts;
2338 ist->dts = ist->next_dts;
2340 if (avpkt.size && avpkt.size != pkt->size &&
2341 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2342 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2343 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2344 ist->showed_multi_packet_warning = 1;
2347 switch (ist->dec_ctx->codec_type) {
2348 case AVMEDIA_TYPE_AUDIO:
2349 ret = decode_audio (ist, &avpkt, &got_output);
2351 case AVMEDIA_TYPE_VIDEO:
2352 ret = decode_video (ist, &avpkt, &got_output);
2353 if (avpkt.duration) {
2354 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2355 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2356 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2357 duration = ((int64_t)AV_TIME_BASE *
2358 ist->dec_ctx->framerate.den * ticks) /
2359 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2363 if(ist->dts != AV_NOPTS_VALUE && duration) {
2364 ist->next_dts += duration;
2366 ist->next_dts = AV_NOPTS_VALUE;
2369 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2371 case AVMEDIA_TYPE_SUBTITLE:
2372 ret = transcode_subtitles(ist, &avpkt, &got_output);
2379 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2380 ist->file_index, ist->st->index, av_err2str(ret));
2387 avpkt.pts= AV_NOPTS_VALUE;
2389 // touch data and size only if not EOF
2391 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2399 if (got_output && !pkt)
2403 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2404 /* except when looping we need to flush but not to send an EOF */
2405 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2406 int ret = send_filter_eof(ist);
2408 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2413 /* handle stream copy */
2414 if (!ist->decoding_needed) {
2415 ist->dts = ist->next_dts;
2416 switch (ist->dec_ctx->codec_type) {
2417 case AVMEDIA_TYPE_AUDIO:
2418 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2419 ist->dec_ctx->sample_rate;
2421 case AVMEDIA_TYPE_VIDEO:
2422 if (ist->framerate.num) {
2423 // TODO: Remove work-around for c99-to-c89 issue 7
2424 AVRational time_base_q = AV_TIME_BASE_Q;
2425 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2426 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2427 } else if (pkt->duration) {
2428 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2429 } else if(ist->dec_ctx->framerate.num != 0) {
2430 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2431 ist->next_dts += ((int64_t)AV_TIME_BASE *
2432 ist->dec_ctx->framerate.den * ticks) /
2433 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2437 ist->pts = ist->dts;
2438 ist->next_pts = ist->next_dts;
2440 for (i = 0; pkt && i < nb_output_streams; i++) {
2441 OutputStream *ost = output_streams[i];
2443 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2446 do_streamcopy(ist, ost, pkt);
2452 static void print_sdp(void)
2457 AVIOContext *sdp_pb;
2458 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2462 for (i = 0, j = 0; i < nb_output_files; i++) {
2463 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2464 avc[j] = output_files[i]->ctx;
2472 av_sdp_create(avc, j, sdp, sizeof(sdp));
2474 if (!sdp_filename) {
2475 printf("SDP:\n%s\n", sdp);
2478 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2479 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2481 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2482 avio_closep(&sdp_pb);
2483 av_freep(&sdp_filename);
2491 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2494 for (i = 0; hwaccels[i].name; i++)
2495 if (hwaccels[i].pix_fmt == pix_fmt)
2496 return &hwaccels[i];
2500 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2502 InputStream *ist = s->opaque;
2503 const enum AVPixelFormat *p;
2506 for (p = pix_fmts; *p != -1; p++) {
2507 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2508 const HWAccel *hwaccel;
2510 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2513 hwaccel = get_hwaccel(*p);
2515 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2516 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2519 ret = hwaccel->init(s);
2521 if (ist->hwaccel_id == hwaccel->id) {
2522 av_log(NULL, AV_LOG_FATAL,
2523 "%s hwaccel requested for input stream #%d:%d, "
2524 "but cannot be initialized.\n", hwaccel->name,
2525 ist->file_index, ist->st->index);
2526 return AV_PIX_FMT_NONE;
2530 ist->active_hwaccel_id = hwaccel->id;
2531 ist->hwaccel_pix_fmt = *p;
2538 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2540 InputStream *ist = s->opaque;
2542 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2543 return ist->hwaccel_get_buffer(s, frame, flags);
2545 return avcodec_default_get_buffer2(s, frame, flags);
2548 static int init_input_stream(int ist_index, char *error, int error_len)
2551 InputStream *ist = input_streams[ist_index];
2553 if (ist->decoding_needed) {
2554 AVCodec *codec = ist->dec;
2556 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2557 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2558 return AVERROR(EINVAL);
2561 ist->dec_ctx->opaque = ist;
2562 ist->dec_ctx->get_format = get_format;
2563 ist->dec_ctx->get_buffer2 = get_buffer;
2564 ist->dec_ctx->thread_safe_callbacks = 1;
2566 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2567 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2568 (ist->decoding_needed & DECODING_FOR_OST)) {
2569 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2570 if (ist->decoding_needed & DECODING_FOR_FILTER)
2571 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2574 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2575 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2576 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2577 if (ret == AVERROR_EXPERIMENTAL)
2578 abort_codec_experimental(codec, 0);
2580 snprintf(error, error_len,
2581 "Error while opening decoder for input stream "
2583 ist->file_index, ist->st->index, av_err2str(ret));
2586 assert_avoptions(ist->decoder_opts);
2589 ist->next_pts = AV_NOPTS_VALUE;
2590 ist->next_dts = AV_NOPTS_VALUE;
2595 static InputStream *get_input_stream(OutputStream *ost)
2597 if (ost->source_index >= 0)
2598 return input_streams[ost->source_index];
2602 static int compare_int64(const void *a, const void *b)
2604 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2607 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2611 if (ost->encoding_needed) {
2612 AVCodec *codec = ost->enc;
2613 AVCodecContext *dec = NULL;
2616 if ((ist = get_input_stream(ost)))
2618 if (dec && dec->subtitle_header) {
2619 /* ASS code assumes this buffer is null terminated so add extra byte. */
2620 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2621 if (!ost->enc_ctx->subtitle_header)
2622 return AVERROR(ENOMEM);
2623 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2624 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2626 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2627 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2628 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2630 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2631 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2632 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2634 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2635 if (ret == AVERROR_EXPERIMENTAL)
2636 abort_codec_experimental(codec, 1);
2637 snprintf(error, error_len,
2638 "Error while opening encoder for output stream #%d:%d - "
2639 "maybe incorrect parameters such as bit_rate, rate, width or height",
2640 ost->file_index, ost->index);
2643 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2644 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2645 av_buffersink_set_frame_size(ost->filter->filter,
2646 ost->enc_ctx->frame_size);
2647 assert_avoptions(ost->encoder_opts);
2648 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2649 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2650 " It takes bits/s as argument, not kbits/s\n");
2652 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2654 av_log(NULL, AV_LOG_FATAL,
2655 "Error initializing the output stream codec context.\n");
2659 if (ost->enc_ctx->nb_coded_side_data) {
2662 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2663 sizeof(*ost->st->side_data));
2664 if (!ost->st->side_data)
2665 return AVERROR(ENOMEM);
2667 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2668 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2669 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2671 sd_dst->data = av_malloc(sd_src->size);
2673 return AVERROR(ENOMEM);
2674 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2675 sd_dst->size = sd_src->size;
2676 sd_dst->type = sd_src->type;
2677 ost->st->nb_side_data++;
2681 // copy timebase while removing common factors
2682 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2683 ost->st->codec->codec= ost->enc_ctx->codec;
2685 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2687 av_log(NULL, AV_LOG_FATAL,
2688 "Error setting up codec context options.\n");
2691 // copy timebase while removing common factors
2692 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2698 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2699 AVCodecContext *avctx)
2702 int n = 1, i, size, index = 0;
2705 for (p = kf; *p; p++)
2709 pts = av_malloc_array(size, sizeof(*pts));
2711 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2716 for (i = 0; i < n; i++) {
2717 char *next = strchr(p, ',');
2722 if (!memcmp(p, "chapters", 8)) {
2724 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2727 if (avf->nb_chapters > INT_MAX - size ||
2728 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2730 av_log(NULL, AV_LOG_FATAL,
2731 "Could not allocate forced key frames array.\n");
2734 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2735 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2737 for (j = 0; j < avf->nb_chapters; j++) {
2738 AVChapter *c = avf->chapters[j];
2739 av_assert1(index < size);
2740 pts[index++] = av_rescale_q(c->start, c->time_base,
2741 avctx->time_base) + t;
2746 t = parse_time_or_die("force_key_frames", p, 1);
2747 av_assert1(index < size);
2748 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2755 av_assert0(index == size);
2756 qsort(pts, size, sizeof(*pts), compare_int64);
2757 ost->forced_kf_count = size;
2758 ost->forced_kf_pts = pts;
2761 static void report_new_stream(int input_index, AVPacket *pkt)
2763 InputFile *file = input_files[input_index];
2764 AVStream *st = file->ctx->streams[pkt->stream_index];
2766 if (pkt->stream_index < file->nb_streams_warn)
2768 av_log(file->ctx, AV_LOG_WARNING,
2769 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2770 av_get_media_type_string(st->codec->codec_type),
2771 input_index, pkt->stream_index,
2772 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2773 file->nb_streams_warn = pkt->stream_index + 1;
2776 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2778 AVDictionaryEntry *e;
2780 uint8_t *encoder_string;
2781 int encoder_string_len;
2782 int format_flags = 0;
2783 int codec_flags = 0;
2785 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2788 e = av_dict_get(of->opts, "fflags", NULL, 0);
2790 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2793 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2795 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2797 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2800 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2803 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2804 encoder_string = av_mallocz(encoder_string_len);
2805 if (!encoder_string)
2808 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2809 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2811 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2812 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2813 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2814 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2817 static int transcode_init(void)
2819 int ret = 0, i, j, k;
2820 AVFormatContext *oc;
2823 char error[1024] = {0};
2826 for (i = 0; i < nb_filtergraphs; i++) {
2827 FilterGraph *fg = filtergraphs[i];
2828 for (j = 0; j < fg->nb_outputs; j++) {
2829 OutputFilter *ofilter = fg->outputs[j];
2830 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2832 if (fg->nb_inputs != 1)
2834 for (k = nb_input_streams-1; k >= 0 ; k--)
2835 if (fg->inputs[0]->ist == input_streams[k])
2837 ofilter->ost->source_index = k;
2841 /* init framerate emulation */
2842 for (i = 0; i < nb_input_files; i++) {
2843 InputFile *ifile = input_files[i];
2844 if (ifile->rate_emu)
2845 for (j = 0; j < ifile->nb_streams; j++)
2846 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2849 /* for each output stream, we compute the right encoding parameters */
2850 for (i = 0; i < nb_output_streams; i++) {
2851 AVCodecContext *enc_ctx;
2852 AVCodecContext *dec_ctx = NULL;
2853 ost = output_streams[i];
2854 oc = output_files[ost->file_index]->ctx;
2855 ist = get_input_stream(ost);
2857 if (ost->attachment_filename)
2860 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2863 dec_ctx = ist->dec_ctx;
2865 ost->st->disposition = ist->st->disposition;
2866 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2867 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2869 for (j=0; j<oc->nb_streams; j++) {
2870 AVStream *st = oc->streams[j];
2871 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2874 if (j == oc->nb_streams)
2875 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2876 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2879 if (ost->stream_copy) {
2881 uint64_t extra_size;
2883 av_assert0(ist && !ost->filter);
2885 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2887 if (extra_size > INT_MAX) {
2888 return AVERROR(EINVAL);
2891 /* if stream_copy is selected, no need to decode or encode */
2892 enc_ctx->codec_id = dec_ctx->codec_id;
2893 enc_ctx->codec_type = dec_ctx->codec_type;
2895 if (!enc_ctx->codec_tag) {
2896 unsigned int codec_tag;
2897 if (!oc->oformat->codec_tag ||
2898 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2899 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2900 enc_ctx->codec_tag = dec_ctx->codec_tag;
2903 enc_ctx->bit_rate = dec_ctx->bit_rate;
2904 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2905 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2906 enc_ctx->field_order = dec_ctx->field_order;
2907 if (dec_ctx->extradata_size) {
2908 enc_ctx->extradata = av_mallocz(extra_size);
2909 if (!enc_ctx->extradata) {
2910 return AVERROR(ENOMEM);
2912 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2914 enc_ctx->extradata_size= dec_ctx->extradata_size;
2915 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2917 enc_ctx->time_base = ist->st->time_base;
2919 * Avi is a special case here because it supports variable fps but
2920 * having the fps and timebase differe significantly adds quite some
2923 if(!strcmp(oc->oformat->name, "avi")) {
2924 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2925 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2926 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2927 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2929 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2930 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2931 enc_ctx->ticks_per_frame = 2;
2932 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2933 && av_q2d(ist->st->time_base) < 1.0/500
2935 enc_ctx->time_base = dec_ctx->time_base;
2936 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2937 enc_ctx->time_base.den *= 2;
2938 enc_ctx->ticks_per_frame = 2;
2940 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2941 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2942 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2943 && strcmp(oc->oformat->name, "f4v")
2945 if( copy_tb<0 && dec_ctx->time_base.den
2946 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2947 && av_q2d(ist->st->time_base) < 1.0/500
2949 enc_ctx->time_base = dec_ctx->time_base;
2950 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2953 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2954 && dec_ctx->time_base.num < dec_ctx->time_base.den
2955 && dec_ctx->time_base.num > 0
2956 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2957 enc_ctx->time_base = dec_ctx->time_base;
2960 if (!ost->frame_rate.num)
2961 ost->frame_rate = ist->framerate;
2962 if(ost->frame_rate.num)
2963 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2965 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2966 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2968 if (ist->st->nb_side_data) {
2969 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2970 sizeof(*ist->st->side_data));
2971 if (!ost->st->side_data)
2972 return AVERROR(ENOMEM);
2974 ost->st->nb_side_data = 0;
2975 for (j = 0; j < ist->st->nb_side_data; j++) {
2976 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2977 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2979 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2982 sd_dst->data = av_malloc(sd_src->size);
2984 return AVERROR(ENOMEM);
2985 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2986 sd_dst->size = sd_src->size;
2987 sd_dst->type = sd_src->type;
2988 ost->st->nb_side_data++;
2992 ost->parser = av_parser_init(enc_ctx->codec_id);
2994 switch (enc_ctx->codec_type) {
2995 case AVMEDIA_TYPE_AUDIO:
2996 if (audio_volume != 256) {
2997 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3000 enc_ctx->channel_layout = dec_ctx->channel_layout;
3001 enc_ctx->sample_rate = dec_ctx->sample_rate;
3002 enc_ctx->channels = dec_ctx->channels;
3003 enc_ctx->frame_size = dec_ctx->frame_size;
3004 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
3005 enc_ctx->block_align = dec_ctx->block_align;
3006 enc_ctx->initial_padding = dec_ctx->delay;
3007 enc_ctx->profile = dec_ctx->profile;
3008 #if FF_API_AUDIOENC_DELAY
3009 enc_ctx->delay = dec_ctx->delay;
3011 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
3012 enc_ctx->block_align= 0;
3013 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3014 enc_ctx->block_align= 0;
3016 case AVMEDIA_TYPE_VIDEO:
3017 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3018 enc_ctx->width = dec_ctx->width;
3019 enc_ctx->height = dec_ctx->height;
3020 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3021 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3023 av_mul_q(ost->frame_aspect_ratio,
3024 (AVRational){ enc_ctx->height, enc_ctx->width });
3025 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3026 "with stream copy may produce invalid files\n");
3028 else if (ist->st->sample_aspect_ratio.num)
3029 sar = ist->st->sample_aspect_ratio;
3031 sar = dec_ctx->sample_aspect_ratio;
3032 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3033 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3034 ost->st->r_frame_rate = ist->st->r_frame_rate;
3036 case AVMEDIA_TYPE_SUBTITLE:
3037 enc_ctx->width = dec_ctx->width;
3038 enc_ctx->height = dec_ctx->height;
3040 case AVMEDIA_TYPE_UNKNOWN:
3041 case AVMEDIA_TYPE_DATA:
3042 case AVMEDIA_TYPE_ATTACHMENT:
3049 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3051 /* should only happen when a default codec is not present. */
3052 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3053 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3054 ret = AVERROR(EINVAL);
3058 set_encoder_id(output_files[ost->file_index], ost);
3061 if (qsv_transcode_init(ost))
3066 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3067 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3069 fg = init_simple_filtergraph(ist, ost);
3070 if (configure_filtergraph(fg)) {
3071 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3076 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3077 if (!ost->frame_rate.num)
3078 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3079 if (ist && !ost->frame_rate.num)
3080 ost->frame_rate = ist->framerate;
3081 if (ist && !ost->frame_rate.num)
3082 ost->frame_rate = ist->st->r_frame_rate;
3083 if (ist && !ost->frame_rate.num) {
3084 ost->frame_rate = (AVRational){25, 1};
3085 av_log(NULL, AV_LOG_WARNING,
3087 "about the input framerate is available. Falling "
3088 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3089 "if you want a different framerate.\n",
3090 ost->file_index, ost->index);
3092 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3093 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3094 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3095 ost->frame_rate = ost->enc->supported_framerates[idx];
3097 // reduce frame rate for mpeg4 to be within the spec limits
3098 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3099 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3100 ost->frame_rate.num, ost->frame_rate.den, 65535);
3104 switch (enc_ctx->codec_type) {
3105 case AVMEDIA_TYPE_AUDIO:
3106 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3107 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3108 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3109 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3110 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3112 case AVMEDIA_TYPE_VIDEO:
3113 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3114 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3115 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3116 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3117 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3118 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3119 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3121 for (j = 0; j < ost->forced_kf_count; j++)
3122 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3124 enc_ctx->time_base);
3126 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3127 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3128 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3129 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3130 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3131 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3132 if (!strncmp(ost->enc->name, "libx264", 7) &&
3133 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3134 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3135 av_log(NULL, AV_LOG_WARNING,
3136 "No pixel format specified, %s for H.264 encoding chosen.\n"
3137 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3138 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3139 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3140 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3141 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3142 av_log(NULL, AV_LOG_WARNING,
3143 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3144 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3145 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3146 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3148 ost->st->avg_frame_rate = ost->frame_rate;
3151 enc_ctx->width != dec_ctx->width ||
3152 enc_ctx->height != dec_ctx->height ||
3153 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3154 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3157 if (ost->forced_keyframes) {
3158 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3159 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3160 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3162 av_log(NULL, AV_LOG_ERROR,
3163 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3166 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3167 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3168 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3169 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3171 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3172 // parse it only for static kf timings
3173 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3174 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3178 case AVMEDIA_TYPE_SUBTITLE:
3179 enc_ctx->time_base = (AVRational){1, 1000};
3180 if (!enc_ctx->width) {
3181 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3182 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3185 case AVMEDIA_TYPE_DATA:
3193 if (ost->disposition) {
3194 static const AVOption opts[] = {
3195 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3196 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3197 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3198 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3199 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3200 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3201 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3202 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3203 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3204 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3205 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3206 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3207 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3208 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3211 static const AVClass class = {
3213 .item_name = av_default_item_name,
3215 .version = LIBAVUTIL_VERSION_INT,
3217 const AVClass *pclass = &class;
3219 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3225 /* open each encoder */
3226 for (i = 0; i < nb_output_streams; i++) {
3227 ret = init_output_stream(output_streams[i], error, sizeof(error));
3232 /* init input streams */
3233 for (i = 0; i < nb_input_streams; i++)
3234 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3235 for (i = 0; i < nb_output_streams; i++) {
3236 ost = output_streams[i];
3237 avcodec_close(ost->enc_ctx);
3242 /* discard unused programs */
3243 for (i = 0; i < nb_input_files; i++) {
3244 InputFile *ifile = input_files[i];
3245 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3246 AVProgram *p = ifile->ctx->programs[j];
3247 int discard = AVDISCARD_ALL;
3249 for (k = 0; k < p->nb_stream_indexes; k++)
3250 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3251 discard = AVDISCARD_DEFAULT;
3254 p->discard = discard;
3258 /* open files and write file headers */
3259 for (i = 0; i < nb_output_files; i++) {
3260 oc = output_files[i]->ctx;
3261 oc->interrupt_callback = int_cb;
3262 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3263 snprintf(error, sizeof(error),
3264 "Could not write header for output file #%d "
3265 "(incorrect codec parameters ?): %s",
3266 i, av_err2str(ret));
3267 ret = AVERROR(EINVAL);
3270 // assert_avoptions(output_files[i]->opts);
3271 if (strcmp(oc->oformat->name, "rtp")) {
3277 /* dump the file output parameters - cannot be done before in case
3279 for (i = 0; i < nb_output_files; i++) {
3280 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3283 /* dump the stream mapping */
3284 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3285 for (i = 0; i < nb_input_streams; i++) {
3286 ist = input_streams[i];
3288 for (j = 0; j < ist->nb_filters; j++) {
3289 if (ist->filters[j]->graph->graph_desc) {
3290 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3291 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3292 ist->filters[j]->name);
3293 if (nb_filtergraphs > 1)
3294 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3295 av_log(NULL, AV_LOG_INFO, "\n");
3300 for (i = 0; i < nb_output_streams; i++) {
3301 ost = output_streams[i];
3303 if (ost->attachment_filename) {
3304 /* an attached file */
3305 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3306 ost->attachment_filename, ost->file_index, ost->index);
3310 if (ost->filter && ost->filter->graph->graph_desc) {
3311 /* output from a complex graph */
3312 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3313 if (nb_filtergraphs > 1)
3314 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3316 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3317 ost->index, ost->enc ? ost->enc->name : "?");
3321 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3322 input_streams[ost->source_index]->file_index,
3323 input_streams[ost->source_index]->st->index,
3326 if (ost->sync_ist != input_streams[ost->source_index])
3327 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3328 ost->sync_ist->file_index,
3329 ost->sync_ist->st->index);
3330 if (ost->stream_copy)
3331 av_log(NULL, AV_LOG_INFO, " (copy)");
3333 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3334 const AVCodec *out_codec = ost->enc;
3335 const char *decoder_name = "?";
3336 const char *in_codec_name = "?";
3337 const char *encoder_name = "?";
3338 const char *out_codec_name = "?";
3339 const AVCodecDescriptor *desc;
3342 decoder_name = in_codec->name;
3343 desc = avcodec_descriptor_get(in_codec->id);
3345 in_codec_name = desc->name;
3346 if (!strcmp(decoder_name, in_codec_name))
3347 decoder_name = "native";
3351 encoder_name = out_codec->name;
3352 desc = avcodec_descriptor_get(out_codec->id);
3354 out_codec_name = desc->name;
3355 if (!strcmp(encoder_name, out_codec_name))
3356 encoder_name = "native";
3359 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3360 in_codec_name, decoder_name,
3361 out_codec_name, encoder_name);
3363 av_log(NULL, AV_LOG_INFO, "\n");
3367 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3371 if (sdp_filename || want_sdp) {
3375 transcode_init_done = 1;
3380 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3381 static int need_output(void)
3385 for (i = 0; i < nb_output_streams; i++) {
3386 OutputStream *ost = output_streams[i];
3387 OutputFile *of = output_files[ost->file_index];
3388 AVFormatContext *os = output_files[ost->file_index]->ctx;
3390 if (ost->finished ||
3391 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3393 if (ost->frame_number >= ost->max_frames) {
3395 for (j = 0; j < of->ctx->nb_streams; j++)
3396 close_output_stream(output_streams[of->ost_index + j]);
3407 * Select the output stream to process.
3409 * @return selected output stream, or NULL if none available
3411 static OutputStream *choose_output(void)
3414 int64_t opts_min = INT64_MAX;
3415 OutputStream *ost_min = NULL;
3417 for (i = 0; i < nb_output_streams; i++) {
3418 OutputStream *ost = output_streams[i];
3419 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3420 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3422 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3423 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3425 if (!ost->finished && opts < opts_min) {
3427 ost_min = ost->unavailable ? NULL : ost;
3433 static void set_tty_echo(int on)
3437 if (tcgetattr(0, &tty) == 0) {
3438 if (on) tty.c_lflag |= ECHO;
3439 else tty.c_lflag &= ~ECHO;
3440 tcsetattr(0, TCSANOW, &tty);
3445 static int check_keyboard_interaction(int64_t cur_time)
3448 static int64_t last_time;
3449 if (received_nb_signals)
3450 return AVERROR_EXIT;
3451 /* read_key() returns 0 on EOF */
3452 if(cur_time - last_time >= 100000 && !run_as_daemon){
3454 last_time = cur_time;
3458 return AVERROR_EXIT;
3459 if (key == '+') av_log_set_level(av_log_get_level()+10);
3460 if (key == '-') av_log_set_level(av_log_get_level()-10);
3461 if (key == 's') qp_hist ^= 1;
3464 do_hex_dump = do_pkt_dump = 0;
3465 } else if(do_pkt_dump){
3469 av_log_set_level(AV_LOG_DEBUG);
3471 if (key == 'c' || key == 'C'){
3472 char buf[4096], target[64], command[256], arg[256] = {0};
3475 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3478 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3483 fprintf(stderr, "\n");
3485 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3486 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3487 target, time, command, arg);
3488 for (i = 0; i < nb_filtergraphs; i++) {
3489 FilterGraph *fg = filtergraphs[i];
3492 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3493 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3494 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3495 } else if (key == 'c') {
3496 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3497 ret = AVERROR_PATCHWELCOME;
3499 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3501 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3506 av_log(NULL, AV_LOG_ERROR,
3507 "Parse error, at least 3 arguments were expected, "
3508 "only %d given in string '%s'\n", n, buf);
3511 if (key == 'd' || key == 'D'){
3514 debug = input_streams[0]->st->codec->debug<<1;
3515 if(!debug) debug = 1;
3516 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3523 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3528 fprintf(stderr, "\n");
3529 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3530 fprintf(stderr,"error parsing debug value\n");
3532 for(i=0;i<nb_input_streams;i++) {
3533 input_streams[i]->st->codec->debug = debug;
3535 for(i=0;i<nb_output_streams;i++) {
3536 OutputStream *ost = output_streams[i];
3537 ost->enc_ctx->debug = debug;
3539 if(debug) av_log_set_level(AV_LOG_DEBUG);
3540 fprintf(stderr,"debug=%d\n", debug);
3543 fprintf(stderr, "key function\n"
3544 "? show this help\n"
3545 "+ increase verbosity\n"
3546 "- decrease verbosity\n"
3547 "c Send command to first matching filter supporting it\n"
3548 "C Send/Que command to all matching filters\n"
3549 "D cycle through available debug modes\n"
3550 "h dump packets/hex press to cycle through the 3 states\n"
3552 "s Show QP histogram\n"
3559 static void *input_thread(void *arg)
3562 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3567 ret = av_read_frame(f->ctx, &pkt);
3569 if (ret == AVERROR(EAGAIN)) {
3574 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3577 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3578 if (flags && ret == AVERROR(EAGAIN)) {
3580 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3581 av_log(f->ctx, AV_LOG_WARNING,
3582 "Thread message queue blocking; consider raising the "
3583 "thread_queue_size option (current value: %d)\n",
3584 f->thread_queue_size);
3587 if (ret != AVERROR_EOF)
3588 av_log(f->ctx, AV_LOG_ERROR,
3589 "Unable to send packet to main thread: %s\n",
3591 av_packet_unref(&pkt);
3592 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3600 static void free_input_threads(void)
3604 for (i = 0; i < nb_input_files; i++) {
3605 InputFile *f = input_files[i];
3608 if (!f || !f->in_thread_queue)
3610 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3611 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3612 av_packet_unref(&pkt);
3614 pthread_join(f->thread, NULL);
3616 av_thread_message_queue_free(&f->in_thread_queue);
3620 static int init_input_threads(void)
3624 if (nb_input_files == 1)
3627 for (i = 0; i < nb_input_files; i++) {
3628 InputFile *f = input_files[i];
3630 if (f->ctx->pb ? !f->ctx->pb->seekable :
3631 strcmp(f->ctx->iformat->name, "lavfi"))
3632 f->non_blocking = 1;
3633 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3634 f->thread_queue_size, sizeof(AVPacket));
3638 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3639 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3640 av_thread_message_queue_free(&f->in_thread_queue);
3641 return AVERROR(ret);
3647 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3649 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3651 AV_THREAD_MESSAGE_NONBLOCK : 0);
3655 static int get_input_packet(InputFile *f, AVPacket *pkt)
3659 for (i = 0; i < f->nb_streams; i++) {
3660 InputStream *ist = input_streams[f->ist_index + i];
3661 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3662 int64_t now = av_gettime_relative() - ist->start;
3664 return AVERROR(EAGAIN);
3669 if (nb_input_files > 1)
3670 return get_input_packet_mt(f, pkt);
3672 return av_read_frame(f->ctx, pkt);
3675 static int got_eagain(void)
3678 for (i = 0; i < nb_output_streams; i++)
3679 if (output_streams[i]->unavailable)
3684 static void reset_eagain(void)
3687 for (i = 0; i < nb_input_files; i++)
3688 input_files[i]->eagain = 0;
3689 for (i = 0; i < nb_output_streams; i++)
3690 output_streams[i]->unavailable = 0;
3693 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3694 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3695 AVRational time_base)
3701 return tmp_time_base;
3704 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3707 return tmp_time_base;
3713 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3716 AVCodecContext *avctx;
3717 int i, ret, has_audio = 0;
3718 int64_t duration = 0;
3720 ret = av_seek_frame(is, -1, is->start_time, 0);
3724 for (i = 0; i < ifile->nb_streams; i++) {
3725 ist = input_streams[ifile->ist_index + i];
3726 avctx = ist->dec_ctx;
3729 if (ist->decoding_needed) {
3730 process_input_packet(ist, NULL, 1);
3731 avcodec_flush_buffers(avctx);
3734 /* duration is the length of the last frame in a stream
3735 * when audio stream is present we don't care about
3736 * last video frame length because it's not defined exactly */
3737 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3741 for (i = 0; i < ifile->nb_streams; i++) {
3742 ist = input_streams[ifile->ist_index + i];
3743 avctx = ist->dec_ctx;
3746 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3747 AVRational sample_rate = {1, avctx->sample_rate};
3749 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3753 if (ist->framerate.num) {
3754 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3755 } else if (ist->st->avg_frame_rate.num) {
3756 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3757 } else duration = 1;
3759 if (!ifile->duration)
3760 ifile->time_base = ist->st->time_base;
3761 /* the total duration of the stream, max_pts - min_pts is
3762 * the duration of the stream without the last frame */
3763 duration += ist->max_pts - ist->min_pts;
3764 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3768 if (ifile->loop > 0)
3776 * - 0 -- one packet was read and processed
3777 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3778 * this function should be called again
3779 * - AVERROR_EOF -- this function should not be called again
3781 static int process_input(int file_index)
3783 InputFile *ifile = input_files[file_index];
3784 AVFormatContext *is;
3792 ret = get_input_packet(ifile, &pkt);
3794 if (ret == AVERROR(EAGAIN)) {
3798 if (ret < 0 && ifile->loop) {
3799 if ((ret = seek_to_start(ifile, is)) < 0)
3801 ret = get_input_packet(ifile, &pkt);
3804 if (ret != AVERROR_EOF) {
3805 print_error(is->filename, ret);
3810 for (i = 0; i < ifile->nb_streams; i++) {
3811 ist = input_streams[ifile->ist_index + i];
3812 if (ist->decoding_needed) {
3813 ret = process_input_packet(ist, NULL, 0);
3818 /* mark all outputs that don't go through lavfi as finished */
3819 for (j = 0; j < nb_output_streams; j++) {
3820 OutputStream *ost = output_streams[j];
3822 if (ost->source_index == ifile->ist_index + i &&
3823 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3824 finish_output_stream(ost);
3828 ifile->eof_reached = 1;
3829 return AVERROR(EAGAIN);
3835 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3836 is->streams[pkt.stream_index]);
3838 /* the following test is needed in case new streams appear
3839 dynamically in stream : we ignore them */
3840 if (pkt.stream_index >= ifile->nb_streams) {
3841 report_new_stream(file_index, &pkt);
3842 goto discard_packet;
3845 ist = input_streams[ifile->ist_index + pkt.stream_index];
3847 ist->data_size += pkt.size;
3851 goto discard_packet;
3853 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3854 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3859 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3860 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3861 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3862 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3863 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3864 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3865 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3866 av_ts2str(input_files[ist->file_index]->ts_offset),
3867 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3870 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3871 int64_t stime, stime2;
3872 // Correcting starttime based on the enabled streams
3873 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3874 // so we instead do it here as part of discontinuity handling
3875 if ( ist->next_dts == AV_NOPTS_VALUE
3876 && ifile->ts_offset == -is->start_time
3877 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3878 int64_t new_start_time = INT64_MAX;
3879 for (i=0; i<is->nb_streams; i++) {
3880 AVStream *st = is->streams[i];
3881 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3883 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3885 if (new_start_time > is->start_time) {
3886 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3887 ifile->ts_offset = -new_start_time;
3891 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3892 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3893 ist->wrap_correction_done = 1;
3895 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3896 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3897 ist->wrap_correction_done = 0;
3899 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3900 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3901 ist->wrap_correction_done = 0;
3905 /* add the stream-global side data to the first packet */
3906 if (ist->nb_packets == 1) {
3907 if (ist->st->nb_side_data)
3908 av_packet_split_side_data(&pkt);
3909 for (i = 0; i < ist->st->nb_side_data; i++) {
3910 AVPacketSideData *src_sd = &ist->st->side_data[i];
3913 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3915 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3918 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3922 memcpy(dst_data, src_sd->data, src_sd->size);
3926 if (pkt.dts != AV_NOPTS_VALUE)
3927 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3928 if (pkt.pts != AV_NOPTS_VALUE)
3929 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3931 if (pkt.pts != AV_NOPTS_VALUE)
3932 pkt.pts *= ist->ts_scale;
3933 if (pkt.dts != AV_NOPTS_VALUE)
3934 pkt.dts *= ist->ts_scale;
3936 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3937 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3938 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3939 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3940 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3941 int64_t delta = pkt_dts - ifile->last_ts;
3942 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3943 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3944 ifile->ts_offset -= delta;
3945 av_log(NULL, AV_LOG_DEBUG,
3946 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3947 delta, ifile->ts_offset);
3948 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3949 if (pkt.pts != AV_NOPTS_VALUE)
3950 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3954 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3955 if (pkt.pts != AV_NOPTS_VALUE) {
3956 pkt.pts += duration;
3957 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3958 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3961 if (pkt.dts != AV_NOPTS_VALUE)
3962 pkt.dts += duration;
3964 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3965 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3966 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3967 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3969 int64_t delta = pkt_dts - ist->next_dts;
3970 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3971 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3972 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3973 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3974 ifile->ts_offset -= delta;
3975 av_log(NULL, AV_LOG_DEBUG,
3976 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3977 delta, ifile->ts_offset);
3978 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3979 if (pkt.pts != AV_NOPTS_VALUE)
3980 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3983 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3984 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3985 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3986 pkt.dts = AV_NOPTS_VALUE;
3988 if (pkt.pts != AV_NOPTS_VALUE){
3989 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3990 delta = pkt_pts - ist->next_dts;
3991 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3992 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3993 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3994 pkt.pts = AV_NOPTS_VALUE;
4000 if (pkt.dts != AV_NOPTS_VALUE)
4001 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4004 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4005 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4006 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4007 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4008 av_ts2str(input_files[ist->file_index]->ts_offset),
4009 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4012 sub2video_heartbeat(ist, pkt.pts);
4014 process_input_packet(ist, &pkt, 0);
4017 av_packet_unref(&pkt);
4023 * Perform a step of transcoding for the specified filter graph.
4025 * @param[in] graph filter graph to consider
4026 * @param[out] best_ist input stream where a frame would allow to continue
4027 * @return 0 for success, <0 for error
4029 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4032 int nb_requests, nb_requests_max = 0;
4033 InputFilter *ifilter;
4037 ret = avfilter_graph_request_oldest(graph->graph);
4039 return reap_filters(0);
4041 if (ret == AVERROR_EOF) {
4042 ret = reap_filters(1);
4043 for (i = 0; i < graph->nb_outputs; i++)
4044 close_output_stream(graph->outputs[i]->ost);
4047 if (ret != AVERROR(EAGAIN))
4050 for (i = 0; i < graph->nb_inputs; i++) {
4051 ifilter = graph->inputs[i];
4053 if (input_files[ist->file_index]->eagain ||
4054 input_files[ist->file_index]->eof_reached)
4056 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4057 if (nb_requests > nb_requests_max) {
4058 nb_requests_max = nb_requests;
4064 for (i = 0; i < graph->nb_outputs; i++)
4065 graph->outputs[i]->ost->unavailable = 1;
4071 * Run a single step of transcoding.
4073 * @return 0 for success, <0 for error
4075 static int transcode_step(void)
4081 ost = choose_output();
4088 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4093 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4098 av_assert0(ost->source_index >= 0);
4099 ist = input_streams[ost->source_index];
4102 ret = process_input(ist->file_index);
4103 if (ret == AVERROR(EAGAIN)) {
4104 if (input_files[ist->file_index]->eagain)
4105 ost->unavailable = 1;
4110 return ret == AVERROR_EOF ? 0 : ret;
4112 return reap_filters(0);
4116 * The following code is the main loop of the file converter
4118 static int transcode(void)
4121 AVFormatContext *os;
4124 int64_t timer_start;
4125 int64_t total_packets_written = 0;
4127 ret = transcode_init();
4131 if (stdin_interaction) {
4132 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4135 timer_start = av_gettime_relative();
4138 if ((ret = init_input_threads()) < 0)
4142 while (!received_sigterm) {
4143 int64_t cur_time= av_gettime_relative();
4145 /* if 'q' pressed, exits */
4146 if (stdin_interaction)
4147 if (check_keyboard_interaction(cur_time) < 0)
4150 /* check if there's any stream where output is still needed */
4151 if (!need_output()) {
4152 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4156 ret = transcode_step();
4158 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
4162 av_strerror(ret, errbuf, sizeof(errbuf));
4164 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4169 /* dump report by using the output first video and audio streams */
4170 print_report(0, timer_start, cur_time);
4173 free_input_threads();
4176 /* at the end of stream, we must flush the decoder buffers */
4177 for (i = 0; i < nb_input_streams; i++) {
4178 ist = input_streams[i];
4179 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4180 process_input_packet(ist, NULL, 0);
4187 /* write the trailer if needed and close file */
4188 for (i = 0; i < nb_output_files; i++) {
4189 os = output_files[i]->ctx;
4190 if ((ret = av_write_trailer(os)) < 0) {
4191 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4197 /* dump report by using the first video and audio streams */
4198 print_report(1, timer_start, av_gettime_relative());
4200 /* close each encoder */
4201 for (i = 0; i < nb_output_streams; i++) {
4202 ost = output_streams[i];
4203 if (ost->encoding_needed) {
4204 av_freep(&ost->enc_ctx->stats_in);
4206 total_packets_written += ost->packets_written;
4209 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4210 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4214 /* close each decoder */
4215 for (i = 0; i < nb_input_streams; i++) {
4216 ist = input_streams[i];
4217 if (ist->decoding_needed) {
4218 avcodec_close(ist->dec_ctx);
4219 if (ist->hwaccel_uninit)
4220 ist->hwaccel_uninit(ist->dec_ctx);
4229 free_input_threads();
4232 if (output_streams) {
4233 for (i = 0; i < nb_output_streams; i++) {
4234 ost = output_streams[i];
4237 fclose(ost->logfile);
4238 ost->logfile = NULL;
4240 av_freep(&ost->forced_kf_pts);
4241 av_freep(&ost->apad);
4242 av_freep(&ost->disposition);
4243 av_dict_free(&ost->encoder_opts);
4244 av_dict_free(&ost->sws_dict);
4245 av_dict_free(&ost->swr_opts);
4246 av_dict_free(&ost->resample_opts);
4247 av_dict_free(&ost->bsf_args);
4255 static int64_t getutime(void)
4258 struct rusage rusage;
4260 getrusage(RUSAGE_SELF, &rusage);
4261 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4262 #elif HAVE_GETPROCESSTIMES
4264 FILETIME c, e, k, u;
4265 proc = GetCurrentProcess();
4266 GetProcessTimes(proc, &c, &e, &k, &u);
4267 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4269 return av_gettime_relative();
4273 static int64_t getmaxrss(void)
4275 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4276 struct rusage rusage;
4277 getrusage(RUSAGE_SELF, &rusage);
4278 return (int64_t)rusage.ru_maxrss * 1024;
4279 #elif HAVE_GETPROCESSMEMORYINFO
4281 PROCESS_MEMORY_COUNTERS memcounters;
4282 proc = GetCurrentProcess();
4283 memcounters.cb = sizeof(memcounters);
4284 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4285 return memcounters.PeakPagefileUsage;
4291 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4295 int main(int argc, char **argv)
4300 register_exit(ffmpeg_cleanup);
4302 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4304 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4305 parse_loglevel(argc, argv, options);
4307 if(argc>1 && !strcmp(argv[1], "-d")){
4309 av_log_set_callback(log_callback_null);
4314 avcodec_register_all();
4316 avdevice_register_all();
4318 avfilter_register_all();
4320 avformat_network_init();
4322 show_banner(argc, argv, options);
4326 /* parse options and open all input/output files */
4327 ret = ffmpeg_parse_options(argc, argv);
4331 if (nb_output_files <= 0 && nb_input_files == 0) {
4333 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4337 /* file converter / grab */
4338 if (nb_output_files <= 0) {
4339 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4343 // if (nb_input_files == 0) {
4344 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4348 current_time = ti = getutime();
4349 if (transcode() < 0)
4351 ti = getutime() - ti;
4353 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4355 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4356 decode_error_stat[0], decode_error_stat[1]);
4357 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4360 exit_program(received_nb_signals ? 255 : main_return_code);
4361 return main_return_code;