2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswresample/swresample.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/channel_layout.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
63 #include "libavutil/threadmessage.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
71 #if HAVE_SYS_RESOURCE_H
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
78 #if HAVE_GETPROCESSMEMORYINFO
82 #if HAVE_SETCONSOLECTRLHANDLER
88 #include <sys/select.h>
93 #include <sys/ioctl.h>
107 #include "cmdutils.h"
109 #include "libavutil/avassert.h"
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
114 static FILE *vstats_file;
116 const char *const forced_keyframes_const_names[] = {
125 static void do_video_stats(OutputStream *ost, int frame_size);
126 static int64_t getutime(void);
127 static int64_t getmaxrss(void);
129 static int run_as_daemon = 0;
130 static int nb_frames_dup = 0;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
134 static int current_time;
135 AVIOContext *progress_avio = NULL;
137 static uint8_t *subtitle_out;
139 InputStream **input_streams = NULL;
140 int nb_input_streams = 0;
141 InputFile **input_files = NULL;
142 int nb_input_files = 0;
144 OutputStream **output_streams = NULL;
145 int nb_output_streams = 0;
146 OutputFile **output_files = NULL;
147 int nb_output_files = 0;
149 FilterGraph **filtergraphs;
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
160 static void free_input_threads(void);
164 Convert subtitles to video with alpha to insert them in filter graphs.
165 This is a temporary solution until libavfilter gets real subtitles support.
168 static int sub2video_get_blank_frame(InputStream *ist)
171 AVFrame *frame = ist->sub2video.frame;
173 av_frame_unref(frame);
174 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
176 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
177 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186 uint32_t *pal, *dst2;
190 if (r->type != SUBTITLE_BITMAP) {
191 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196 r->x, r->y, r->w, r->h, w, h
201 dst += r->y * dst_linesize + r->x * 4;
202 src = r->pict.data[0];
203 pal = (uint32_t *)r->pict.data[1];
204 for (y = 0; y < r->h; y++) {
205 dst2 = (uint32_t *)dst;
207 for (x = 0; x < r->w; x++)
208 *(dst2++) = pal[*(src2++)];
210 src += r->pict.linesize[0];
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 AVFrame *frame = ist->sub2video.frame;
219 av_assert1(frame->data[0]);
220 ist->sub2video.last_pts = frame->pts = pts;
221 for (i = 0; i < ist->nb_filters; i++)
222 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
223 AV_BUFFERSRC_FLAG_KEEP_REF |
224 AV_BUFFERSRC_FLAG_PUSH);
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 AVFrame *frame = ist->sub2video.frame;
233 int64_t pts, end_pts;
238 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241 AV_TIME_BASE_Q, ist->st->time_base);
242 num_rects = sub->num_rects;
244 pts = ist->sub2video.end_pts;
248 if (sub2video_get_blank_frame(ist) < 0) {
249 av_log(ist->dec_ctx, AV_LOG_ERROR,
250 "Impossible to get a blank canvas.\n");
253 dst = frame->data [0];
254 dst_linesize = frame->linesize[0];
255 for (i = 0; i < num_rects; i++)
256 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257 sub2video_push_ref(ist, pts);
258 ist->sub2video.end_pts = end_pts;
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 InputFile *infile = input_files[ist->file_index];
267 /* When a frame is read from a file, examine all sub2video streams in
268 the same file and send the sub2video frame again. Otherwise, decoded
269 video frames could be accumulating in the filter graph while a filter
270 (possibly overlay) is desperately waiting for a subtitle frame. */
271 for (i = 0; i < infile->nb_streams; i++) {
272 InputStream *ist2 = input_streams[infile->ist_index + i];
273 if (!ist2->sub2video.frame)
275 /* subtitles seem to be usually muxed ahead of other streams;
276 if not, subtracting a larger time here is necessary */
277 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278 /* do not send the heartbeat frame if the subtitle is already ahead */
279 if (pts2 <= ist2->sub2video.last_pts)
281 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282 sub2video_update(ist2, NULL);
283 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286 sub2video_push_ref(ist2, pts2);
290 static void sub2video_flush(InputStream *ist)
294 if (ist->sub2video.end_pts < INT64_MAX)
295 sub2video_update(ist, NULL);
296 for (i = 0; i < ist->nb_filters; i++)
297 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
300 /* end of sub2video hack */
302 static void term_exit_sigsafe(void)
306 tcsetattr (0, TCSANOW, &oldtty);
312 av_log(NULL, AV_LOG_QUIET, "%s", "");
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
323 sigterm_handler(int sig)
325 received_sigterm = sig;
326 received_nb_signals++;
328 if(received_nb_signals > 3) {
329 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330 strlen("Received > 3 system signals, hard exiting\n"));
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
344 case CTRL_BREAK_EVENT:
345 sigterm_handler(SIGINT);
348 case CTRL_CLOSE_EVENT:
349 case CTRL_LOGOFF_EVENT:
350 case CTRL_SHUTDOWN_EVENT:
351 sigterm_handler(SIGTERM);
352 /* Basically, with these 3 events, when we return from this method the
353 process is hard terminated, so stall as long as we need to
354 to try and let the main thread(s) clean up and gracefully terminate
355 (we have at most 5 seconds, but should be done far before that). */
356 while (!ffmpeg_exited) {
362 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
375 istty = isatty(0) && isatty(2);
377 if (istty && tcgetattr (0, &tty) == 0) {
381 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
382 |INLCR|IGNCR|ICRNL|IXON);
383 tty.c_oflag |= OPOST;
384 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
385 tty.c_cflag &= ~(CSIZE|PARENB);
390 tcsetattr (0, TCSANOW, &tty);
392 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
396 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
397 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 signal(SIGXCPU, sigterm_handler);
401 #if HAVE_SETCONSOLECTRLHANDLER
402 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
406 /* read a key without blocking */
407 static int read_key(void)
419 n = select(1, &rfds, NULL, NULL, &tv);
428 # if HAVE_PEEKNAMEDPIPE
430 static HANDLE input_handle;
433 input_handle = GetStdHandle(STD_INPUT_HANDLE);
434 is_pipe = !GetConsoleMode(input_handle, &dw);
438 /* When running under a GUI, you will end here. */
439 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
440 // input pipe may have been closed by the program that ran ffmpeg
458 static int decode_interrupt_cb(void *ctx)
460 return received_nb_signals > transcode_init_done;
463 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465 static void ffmpeg_cleanup(int ret)
470 int maxrss = getmaxrss() / 1024;
471 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
474 for (i = 0; i < nb_filtergraphs; i++) {
475 FilterGraph *fg = filtergraphs[i];
476 avfilter_graph_free(&fg->graph);
477 for (j = 0; j < fg->nb_inputs; j++) {
478 av_freep(&fg->inputs[j]->name);
479 av_freep(&fg->inputs[j]);
481 av_freep(&fg->inputs);
482 for (j = 0; j < fg->nb_outputs; j++) {
483 av_freep(&fg->outputs[j]->name);
484 av_freep(&fg->outputs[j]);
486 av_freep(&fg->outputs);
487 av_freep(&fg->graph_desc);
489 av_freep(&filtergraphs[i]);
491 av_freep(&filtergraphs);
493 av_freep(&subtitle_out);
496 for (i = 0; i < nb_output_files; i++) {
497 OutputFile *of = output_files[i];
502 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504 avformat_free_context(s);
505 av_dict_free(&of->opts);
507 av_freep(&output_files[i]);
509 for (i = 0; i < nb_output_streams; i++) {
510 OutputStream *ost = output_streams[i];
511 AVBitStreamFilterContext *bsfc;
516 bsfc = ost->bitstream_filters;
518 AVBitStreamFilterContext *next = bsfc->next;
519 av_bitstream_filter_close(bsfc);
522 ost->bitstream_filters = NULL;
523 av_frame_free(&ost->filtered_frame);
524 av_frame_free(&ost->last_frame);
526 av_parser_close(ost->parser);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 avcodec_free_context(&ost->enc_ctx);
538 av_freep(&output_streams[i]);
541 free_input_threads();
543 for (i = 0; i < nb_input_files; i++) {
544 avformat_close_input(&input_files[i]->ctx);
545 av_freep(&input_files[i]);
547 for (i = 0; i < nb_input_streams; i++) {
548 InputStream *ist = input_streams[i];
550 av_frame_free(&ist->decoded_frame);
551 av_frame_free(&ist->filter_frame);
552 av_dict_free(&ist->decoder_opts);
553 avsubtitle_free(&ist->prev_sub.subtitle);
554 av_frame_free(&ist->sub2video.frame);
555 av_freep(&ist->filters);
556 av_freep(&ist->hwaccel_device);
558 avcodec_free_context(&ist->dec_ctx);
560 av_freep(&input_streams[i]);
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
683 av_packet_split_side_data(pkt);
686 AVPacket new_pkt = *pkt;
687 AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
690 int a = av_bitstream_filter_filter(bsfc, avctx,
691 bsf_arg ? bsf_arg->value : NULL,
692 &new_pkt.data, &new_pkt.size,
693 pkt->data, pkt->size,
694 pkt->flags & AV_PKT_FLAG_KEY);
695 FF_DISABLE_DEPRECATION_WARNINGS
696 if(a == 0 && new_pkt.data != pkt->data
697 #if FF_API_DESTRUCT_PACKET
701 FF_ENABLE_DEPRECATION_WARNINGS
702 uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
704 memcpy(t, new_pkt.data, new_pkt.size);
705 memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
713 pkt->side_data = NULL;
714 pkt->side_data_elems = 0;
716 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
717 av_buffer_default_free, NULL, 0);
722 av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
723 bsfc->filter->name, pkt->stream_index,
724 avctx->codec ? avctx->codec->name : "copy");
734 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
735 if (pkt->dts != AV_NOPTS_VALUE &&
736 pkt->pts != AV_NOPTS_VALUE &&
737 pkt->dts > pkt->pts) {
738 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
740 ost->file_index, ost->st->index);
742 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
743 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
744 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
747 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
748 pkt->dts != AV_NOPTS_VALUE &&
749 ost->last_mux_dts != AV_NOPTS_VALUE) {
750 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
751 if (pkt->dts < max) {
752 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
753 av_log(s, loglevel, "Non-monotonous DTS in output stream "
754 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
755 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
757 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
760 av_log(s, loglevel, "changing to %"PRId64". This may result "
761 "in incorrect timestamps in the output file.\n",
763 if(pkt->pts >= pkt->dts)
764 pkt->pts = FFMAX(pkt->pts, max);
769 ost->last_mux_dts = pkt->dts;
771 ost->data_size += pkt->size;
772 ost->packets_written++;
774 pkt->stream_index = ost->index;
777 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
778 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
779 av_get_media_type_string(ost->enc_ctx->codec_type),
780 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
781 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
786 ret = av_interleaved_write_frame(s, pkt);
788 print_error("av_interleaved_write_frame()", ret);
789 main_return_code = 1;
790 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
795 static void close_output_stream(OutputStream *ost)
797 OutputFile *of = output_files[ost->file_index];
799 ost->finished |= ENCODER_FINISHED;
801 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
802 of->recording_time = FFMIN(of->recording_time, end);
806 static int check_recording_time(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 if (of->recording_time != INT64_MAX &&
811 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
812 AV_TIME_BASE_Q) >= 0) {
813 close_output_stream(ost);
819 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
822 AVCodecContext *enc = ost->enc_ctx;
826 av_init_packet(&pkt);
830 if (!check_recording_time(ost))
833 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
834 frame->pts = ost->sync_opts;
835 ost->sync_opts = frame->pts + frame->nb_samples;
836 ost->samples_encoded += frame->nb_samples;
837 ost->frames_encoded++;
839 av_assert0(pkt.size || !pkt.data);
840 update_benchmark(NULL);
842 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
843 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
844 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
845 enc->time_base.num, enc->time_base.den);
848 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
849 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
852 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
855 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
858 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
859 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
860 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
861 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
864 write_frame(s, &pkt, ost);
868 static void do_subtitle_out(AVFormatContext *s,
873 int subtitle_out_max_size = 1024 * 1024;
874 int subtitle_out_size, nb, i;
879 if (sub->pts == AV_NOPTS_VALUE) {
880 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
889 subtitle_out = av_malloc(subtitle_out_max_size);
891 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
896 /* Note: DVB subtitle need one packet to draw them and one other
897 packet to clear them */
898 /* XXX: signal it in the codec context ? */
899 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
904 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
906 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
907 pts -= output_files[ost->file_index]->start_time;
908 for (i = 0; i < nb; i++) {
909 unsigned save_num_rects = sub->num_rects;
911 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
912 if (!check_recording_time(ost))
916 // start_display_time is required to be 0
917 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
918 sub->end_display_time -= sub->start_display_time;
919 sub->start_display_time = 0;
923 ost->frames_encoded++;
925 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
926 subtitle_out_max_size, sub);
928 sub->num_rects = save_num_rects;
929 if (subtitle_out_size < 0) {
930 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
934 av_init_packet(&pkt);
935 pkt.data = subtitle_out;
936 pkt.size = subtitle_out_size;
937 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
938 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
939 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
940 /* XXX: the pts correction is handled here. Maybe handling
941 it in the codec would be better */
943 pkt.pts += 90 * sub->start_display_time;
945 pkt.pts += 90 * sub->end_display_time;
948 write_frame(s, &pkt, ost);
952 static void do_video_out(AVFormatContext *s,
954 AVFrame *next_picture,
957 int ret, format_video_sync;
959 AVCodecContext *enc = ost->enc_ctx;
960 AVCodecContext *mux_enc = ost->st->codec;
961 int nb_frames, nb0_frames, i;
962 double delta, delta0;
965 InputStream *ist = NULL;
966 AVFilterContext *filter = ost->filter->filter;
968 if (ost->source_index >= 0)
969 ist = input_streams[ost->source_index];
971 if (filter->inputs[0]->frame_rate.num > 0 &&
972 filter->inputs[0]->frame_rate.den > 0)
973 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
975 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
976 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
978 if (!ost->filters_script &&
982 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
983 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
988 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
989 ost->last_nb0_frames[1],
990 ost->last_nb0_frames[2]);
992 delta0 = sync_ipts - ost->sync_opts;
993 delta = delta0 + duration;
995 /* by default, we output a single frame */
999 format_video_sync = video_sync_method;
1000 if (format_video_sync == VSYNC_AUTO) {
1001 if(!strcmp(s->oformat->name, "avi")) {
1002 format_video_sync = VSYNC_VFR;
1004 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1006 && format_video_sync == VSYNC_CFR
1007 && input_files[ist->file_index]->ctx->nb_streams == 1
1008 && input_files[ist->file_index]->input_ts_offset == 0) {
1009 format_video_sync = VSYNC_VSCFR;
1011 if (format_video_sync == VSYNC_CFR && copy_ts) {
1012 format_video_sync = VSYNC_VSCFR;
1018 format_video_sync != VSYNC_PASSTHROUGH &&
1019 format_video_sync != VSYNC_DROP) {
1020 double cor = FFMIN(-delta0, duration);
1021 if (delta0 < -0.6) {
1022 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1024 av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1030 switch (format_video_sync) {
1032 if (ost->frame_number == 0 && delta - duration >= 0.5) {
1033 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1036 ost->sync_opts = lrint(sync_ipts);
1039 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1040 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1042 } else if (delta < -1.1)
1044 else if (delta > 1.1) {
1045 nb_frames = lrintf(delta);
1047 nb0_frames = lrintf(delta0 - 0.6);
1053 else if (delta > 0.6)
1054 ost->sync_opts = lrint(sync_ipts);
1057 case VSYNC_PASSTHROUGH:
1058 ost->sync_opts = lrint(sync_ipts);
1065 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1066 nb0_frames = FFMIN(nb0_frames, nb_frames);
1068 memmove(ost->last_nb0_frames + 1,
1069 ost->last_nb0_frames,
1070 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1071 ost->last_nb0_frames[0] = nb0_frames;
1073 if (nb0_frames == 0 && ost->last_droped) {
1075 av_log(NULL, AV_LOG_VERBOSE,
1076 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1077 ost->frame_number, ost->st->index, ost->last_frame->pts);
1079 if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1080 if (nb_frames > dts_error_threshold * 30) {
1081 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1085 nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1086 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1088 ost->last_droped = nb_frames == nb0_frames && next_picture;
1090 /* duplicates frame if needed */
1091 for (i = 0; i < nb_frames; i++) {
1092 AVFrame *in_picture;
1093 av_init_packet(&pkt);
1097 if (i < nb0_frames && ost->last_frame) {
1098 in_picture = ost->last_frame;
1100 in_picture = next_picture;
1105 in_picture->pts = ost->sync_opts;
1108 if (!check_recording_time(ost))
1110 if (ost->frame_number >= ost->max_frames)
1114 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1115 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1116 /* raw pictures are written as AVPicture structure to
1117 avoid any copies. We support temporarily the older
1119 if (in_picture->interlaced_frame)
1120 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1122 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1123 pkt.data = (uint8_t *)in_picture;
1124 pkt.size = sizeof(AVPicture);
1125 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1126 pkt.flags |= AV_PKT_FLAG_KEY;
1128 write_frame(s, &pkt, ost);
1130 int got_packet, forced_keyframe = 0;
1133 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1134 ost->top_field_first >= 0)
1135 in_picture->top_field_first = !!ost->top_field_first;
1137 if (in_picture->interlaced_frame) {
1138 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1139 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1141 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1143 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1145 in_picture->quality = enc->global_quality;
1146 in_picture->pict_type = 0;
1148 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1149 in_picture->pts * av_q2d(enc->time_base) : NAN;
1150 if (ost->forced_kf_index < ost->forced_kf_count &&
1151 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1152 ost->forced_kf_index++;
1153 forced_keyframe = 1;
1154 } else if (ost->forced_keyframes_pexpr) {
1156 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1157 res = av_expr_eval(ost->forced_keyframes_pexpr,
1158 ost->forced_keyframes_expr_const_values, NULL);
1159 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1160 ost->forced_keyframes_expr_const_values[FKF_N],
1161 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1162 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1163 ost->forced_keyframes_expr_const_values[FKF_T],
1164 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1167 forced_keyframe = 1;
1168 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1169 ost->forced_keyframes_expr_const_values[FKF_N];
1170 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1171 ost->forced_keyframes_expr_const_values[FKF_T];
1172 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1175 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1176 } else if ( ost->forced_keyframes
1177 && !strncmp(ost->forced_keyframes, "source", 6)
1178 && in_picture->key_frame==1) {
1179 forced_keyframe = 1;
1182 if (forced_keyframe) {
1183 in_picture->pict_type = AV_PICTURE_TYPE_I;
1184 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1187 update_benchmark(NULL);
1189 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1190 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1191 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1192 enc->time_base.num, enc->time_base.den);
1195 ost->frames_encoded++;
1197 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1198 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1200 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1206 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1207 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1208 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1209 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1212 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1213 pkt.pts = ost->sync_opts;
1215 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1218 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1219 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1220 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1221 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1224 frame_size = pkt.size;
1225 write_frame(s, &pkt, ost);
1227 /* if two pass, output log */
1228 if (ost->logfile && enc->stats_out) {
1229 fprintf(ost->logfile, "%s", enc->stats_out);
1235 * For video, number of frames in == number of packets out.
1236 * But there may be reordering, so we can't throw away frames on encoder
1237 * flush, we need to limit them here, before they go into encoder.
1239 ost->frame_number++;
1241 if (vstats_filename && frame_size)
1242 do_video_stats(ost, frame_size);
1245 if (!ost->last_frame)
1246 ost->last_frame = av_frame_alloc();
1247 av_frame_unref(ost->last_frame);
1248 if (next_picture && ost->last_frame)
1249 av_frame_ref(ost->last_frame, next_picture);
1251 av_frame_free(&ost->last_frame);
1254 static double psnr(double d)
1256 return -10.0 * log(d) / log(10.0);
1259 static void do_video_stats(OutputStream *ost, int frame_size)
1261 AVCodecContext *enc;
1263 double ti1, bitrate, avg_bitrate;
1265 /* this is executed just the first time do_video_stats is called */
1267 vstats_file = fopen(vstats_filename, "w");
1275 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1276 frame_number = ost->st->nb_frames;
1277 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1278 ost->quality / (float)FF_QP2LAMBDA);
1280 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1281 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1283 fprintf(vstats_file,"f_size= %6d ", frame_size);
1284 /* compute pts value */
1285 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1289 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1290 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1291 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1292 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1293 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1297 static void finish_output_stream(OutputStream *ost)
1299 OutputFile *of = output_files[ost->file_index];
1302 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1305 for (i = 0; i < of->ctx->nb_streams; i++)
1306 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1311 * Get and encode new output from any of the filtergraphs, without causing
1314 * @return 0 for success, <0 for severe errors
1316 static int reap_filters(int flush)
1318 AVFrame *filtered_frame = NULL;
1321 /* Reap all buffers present in the buffer sinks */
1322 for (i = 0; i < nb_output_streams; i++) {
1323 OutputStream *ost = output_streams[i];
1324 OutputFile *of = output_files[ost->file_index];
1325 AVFilterContext *filter;
1326 AVCodecContext *enc = ost->enc_ctx;
1331 filter = ost->filter->filter;
1333 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1334 return AVERROR(ENOMEM);
1336 filtered_frame = ost->filtered_frame;
1339 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1340 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1341 AV_BUFFERSINK_FLAG_NO_REQUEST);
1343 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1344 av_log(NULL, AV_LOG_WARNING,
1345 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1346 } else if (flush && ret == AVERROR_EOF) {
1347 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1348 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1352 if (ost->finished) {
1353 av_frame_unref(filtered_frame);
1356 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1357 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1358 AVRational tb = enc->time_base;
1359 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1361 tb.den <<= extra_bits;
1363 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1364 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1365 float_pts /= 1 << extra_bits;
1366 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1367 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1369 filtered_frame->pts =
1370 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1371 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1373 //if (ost->source_index >= 0)
1374 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1376 switch (filter->inputs[0]->type) {
1377 case AVMEDIA_TYPE_VIDEO:
1378 if (!ost->frame_aspect_ratio.num)
1379 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1382 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1383 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1385 enc->time_base.num, enc->time_base.den);
1388 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1390 case AVMEDIA_TYPE_AUDIO:
1391 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1392 enc->channels != av_frame_get_channels(filtered_frame)) {
1393 av_log(NULL, AV_LOG_ERROR,
1394 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1397 do_audio_out(of->ctx, ost, filtered_frame);
1400 // TODO support subtitle filters
1404 av_frame_unref(filtered_frame);
1411 static void print_final_stats(int64_t total_size)
1413 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1414 uint64_t subtitle_size = 0;
1415 uint64_t data_size = 0;
1416 float percent = -1.0;
1420 for (i = 0; i < nb_output_streams; i++) {
1421 OutputStream *ost = output_streams[i];
1422 switch (ost->enc_ctx->codec_type) {
1423 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1424 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1425 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1426 default: other_size += ost->data_size; break;
1428 extra_size += ost->enc_ctx->extradata_size;
1429 data_size += ost->data_size;
1430 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1431 != AV_CODEC_FLAG_PASS1)
1435 if (data_size && total_size>0 && total_size >= data_size)
1436 percent = 100.0 * (total_size - data_size) / data_size;
1438 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1439 video_size / 1024.0,
1440 audio_size / 1024.0,
1441 subtitle_size / 1024.0,
1442 other_size / 1024.0,
1443 extra_size / 1024.0);
1445 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1447 av_log(NULL, AV_LOG_INFO, "unknown");
1448 av_log(NULL, AV_LOG_INFO, "\n");
1450 /* print verbose per-stream stats */
1451 for (i = 0; i < nb_input_files; i++) {
1452 InputFile *f = input_files[i];
1453 uint64_t total_packets = 0, total_size = 0;
1455 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1456 i, f->ctx->filename);
1458 for (j = 0; j < f->nb_streams; j++) {
1459 InputStream *ist = input_streams[f->ist_index + j];
1460 enum AVMediaType type = ist->dec_ctx->codec_type;
1462 total_size += ist->data_size;
1463 total_packets += ist->nb_packets;
1465 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1466 i, j, media_type_string(type));
1467 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1468 ist->nb_packets, ist->data_size);
1470 if (ist->decoding_needed) {
1471 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1472 ist->frames_decoded);
1473 if (type == AVMEDIA_TYPE_AUDIO)
1474 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1475 av_log(NULL, AV_LOG_VERBOSE, "; ");
1478 av_log(NULL, AV_LOG_VERBOSE, "\n");
1481 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1482 total_packets, total_size);
1485 for (i = 0; i < nb_output_files; i++) {
1486 OutputFile *of = output_files[i];
1487 uint64_t total_packets = 0, total_size = 0;
1489 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1490 i, of->ctx->filename);
1492 for (j = 0; j < of->ctx->nb_streams; j++) {
1493 OutputStream *ost = output_streams[of->ost_index + j];
1494 enum AVMediaType type = ost->enc_ctx->codec_type;
1496 total_size += ost->data_size;
1497 total_packets += ost->packets_written;
1499 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1500 i, j, media_type_string(type));
1501 if (ost->encoding_needed) {
1502 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1503 ost->frames_encoded);
1504 if (type == AVMEDIA_TYPE_AUDIO)
1505 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1506 av_log(NULL, AV_LOG_VERBOSE, "; ");
1509 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1510 ost->packets_written, ost->data_size);
1512 av_log(NULL, AV_LOG_VERBOSE, "\n");
1515 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1516 total_packets, total_size);
1518 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1519 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1521 av_log(NULL, AV_LOG_WARNING, "\n");
1523 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1528 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1531 AVBPrint buf_script;
1533 AVFormatContext *oc;
1535 AVCodecContext *enc;
1536 int frame_number, vid, i;
1538 int64_t pts = INT64_MIN;
1539 static int64_t last_time = -1;
1540 static int qp_histogram[52];
1541 int hours, mins, secs, us;
1543 if (!print_stats && !is_last_report && !progress_avio)
1546 if (!is_last_report) {
1547 if (last_time == -1) {
1548 last_time = cur_time;
1551 if ((cur_time - last_time) < 500000)
1553 last_time = cur_time;
1557 oc = output_files[0]->ctx;
1559 total_size = avio_size(oc->pb);
1560 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1561 total_size = avio_tell(oc->pb);
1565 av_bprint_init(&buf_script, 0, 1);
1566 for (i = 0; i < nb_output_streams; i++) {
1568 ost = output_streams[i];
1570 if (!ost->stream_copy)
1571 q = ost->quality / (float) FF_QP2LAMBDA;
1573 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1574 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1575 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1576 ost->file_index, ost->index, q);
1578 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1579 float fps, t = (cur_time-timer_start) / 1000000.0;
1581 frame_number = ost->frame_number;
1582 fps = t > 1 ? frame_number / t : 0;
1583 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1584 frame_number, fps < 9.95, fps, q);
1585 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1586 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1587 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1588 ost->file_index, ost->index, q);
1590 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1594 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1596 for (j = 0; j < 32; j++)
1597 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1600 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1602 double error, error_sum = 0;
1603 double scale, scale_sum = 0;
1605 char type[3] = { 'Y','U','V' };
1606 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1607 for (j = 0; j < 3; j++) {
1608 if (is_last_report) {
1609 error = enc->error[j];
1610 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1612 error = ost->error[j];
1613 scale = enc->width * enc->height * 255.0 * 255.0;
1619 p = psnr(error / scale);
1620 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1621 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1622 ost->file_index, ost->index, type[j] | 32, p);
1624 p = psnr(error_sum / scale_sum);
1625 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1626 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1627 ost->file_index, ost->index, p);
1631 /* compute min output value */
1632 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1633 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1634 ost->st->time_base, AV_TIME_BASE_Q));
1636 nb_frames_drop += ost->last_droped;
1639 secs = FFABS(pts) / AV_TIME_BASE;
1640 us = FFABS(pts) % AV_TIME_BASE;
1646 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1648 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1650 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1651 "size=%8.0fkB time=", total_size / 1024.0);
1653 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1654 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1655 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1656 (100 * us) / AV_TIME_BASE);
1659 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1660 av_bprintf(&buf_script, "bitrate=N/A\n");
1662 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1663 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1666 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1667 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1668 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1669 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1670 hours, mins, secs, us);
1672 if (nb_frames_dup || nb_frames_drop)
1673 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1674 nb_frames_dup, nb_frames_drop);
1675 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1676 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1678 if (print_stats || is_last_report) {
1679 const char end = is_last_report ? '\n' : '\r';
1680 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1681 fprintf(stderr, "%s %c", buf, end);
1683 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1688 if (progress_avio) {
1689 av_bprintf(&buf_script, "progress=%s\n",
1690 is_last_report ? "end" : "continue");
1691 avio_write(progress_avio, buf_script.str,
1692 FFMIN(buf_script.len, buf_script.size - 1));
1693 avio_flush(progress_avio);
1694 av_bprint_finalize(&buf_script, NULL);
1695 if (is_last_report) {
1696 avio_closep(&progress_avio);
1701 print_final_stats(total_size);
1704 static void flush_encoders(void)
1708 for (i = 0; i < nb_output_streams; i++) {
1709 OutputStream *ost = output_streams[i];
1710 AVCodecContext *enc = ost->enc_ctx;
1711 AVFormatContext *os = output_files[ost->file_index]->ctx;
1712 int stop_encoding = 0;
1714 if (!ost->encoding_needed)
1717 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1719 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1723 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1726 switch (enc->codec_type) {
1727 case AVMEDIA_TYPE_AUDIO:
1728 encode = avcodec_encode_audio2;
1731 case AVMEDIA_TYPE_VIDEO:
1732 encode = avcodec_encode_video2;
1743 av_init_packet(&pkt);
1747 update_benchmark(NULL);
1748 ret = encode(enc, &pkt, NULL, &got_packet);
1749 update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1751 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1756 if (ost->logfile && enc->stats_out) {
1757 fprintf(ost->logfile, "%s", enc->stats_out);
1763 if (ost->finished & MUXER_FINISHED) {
1764 av_free_packet(&pkt);
1767 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1768 pkt_size = pkt.size;
1769 write_frame(os, &pkt, ost);
1770 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1771 do_video_stats(ost, pkt_size);
1782 * Check whether a packet from ist should be written into ost at this time
1784 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1786 OutputFile *of = output_files[ost->file_index];
1787 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1789 if (ost->source_index != ist_index)
1795 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1801 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1803 OutputFile *of = output_files[ost->file_index];
1804 InputFile *f = input_files [ist->file_index];
1805 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1806 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1807 int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1811 av_init_packet(&opkt);
1813 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1814 !ost->copy_initial_nonkeyframes)
1817 if (pkt->pts == AV_NOPTS_VALUE) {
1818 if (!ost->frame_number && ist->pts < start_time &&
1819 !ost->copy_prior_start)
1822 if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1823 !ost->copy_prior_start)
1827 if (of->recording_time != INT64_MAX &&
1828 ist->pts >= of->recording_time + start_time) {
1829 close_output_stream(ost);
1833 if (f->recording_time != INT64_MAX) {
1834 start_time = f->ctx->start_time;
1835 if (f->start_time != AV_NOPTS_VALUE)
1836 start_time += f->start_time;
1837 if (ist->pts >= f->recording_time + start_time) {
1838 close_output_stream(ost);
1843 /* force the input stream PTS */
1844 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1847 if (pkt->pts != AV_NOPTS_VALUE)
1848 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1850 opkt.pts = AV_NOPTS_VALUE;
1852 if (pkt->dts == AV_NOPTS_VALUE)
1853 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1855 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1856 opkt.dts -= ost_tb_start_time;
1858 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1859 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1861 duration = ist->dec_ctx->frame_size;
1862 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1863 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1864 ost->st->time_base) - ost_tb_start_time;
1867 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1868 opkt.flags = pkt->flags;
1869 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1870 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1871 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1872 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1873 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1875 int ret = av_parser_change(ost->parser, ost->st->codec,
1876 &opkt.data, &opkt.size,
1877 pkt->data, pkt->size,
1878 pkt->flags & AV_PKT_FLAG_KEY);
1880 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1885 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1890 opkt.data = pkt->data;
1891 opkt.size = pkt->size;
1893 av_copy_packet_side_data(&opkt, pkt);
1895 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1896 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1897 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1898 /* store AVPicture in AVPacket, as expected by the output format */
1899 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1901 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1905 opkt.data = (uint8_t *)&pict;
1906 opkt.size = sizeof(AVPicture);
1907 opkt.flags |= AV_PKT_FLAG_KEY;
1910 write_frame(of->ctx, &opkt, ost);
1913 int guess_input_channel_layout(InputStream *ist)
1915 AVCodecContext *dec = ist->dec_ctx;
1917 if (!dec->channel_layout) {
1918 char layout_name[256];
1920 if (dec->channels > ist->guess_layout_max)
1922 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1923 if (!dec->channel_layout)
1925 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1926 dec->channels, dec->channel_layout);
1927 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1928 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1933 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1935 AVFrame *decoded_frame, *f;
1936 AVCodecContext *avctx = ist->dec_ctx;
1937 int i, ret, err = 0, resample_changed;
1938 AVRational decoded_frame_tb;
1940 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1941 return AVERROR(ENOMEM);
1942 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1943 return AVERROR(ENOMEM);
1944 decoded_frame = ist->decoded_frame;
1946 update_benchmark(NULL);
1947 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1948 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1950 if (ret >= 0 && avctx->sample_rate <= 0) {
1951 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1952 ret = AVERROR_INVALIDDATA;
1955 if (*got_output || ret<0)
1956 decode_error_stat[ret<0] ++;
1958 if (ret < 0 && exit_on_error)
1961 if (!*got_output || ret < 0)
1964 ist->samples_decoded += decoded_frame->nb_samples;
1965 ist->frames_decoded++;
1968 /* increment next_dts to use for the case where the input stream does not
1969 have timestamps or there are multiple frames in the packet */
1970 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1976 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1977 ist->resample_channels != avctx->channels ||
1978 ist->resample_channel_layout != decoded_frame->channel_layout ||
1979 ist->resample_sample_rate != decoded_frame->sample_rate;
1980 if (resample_changed) {
1981 char layout1[64], layout2[64];
1983 if (!guess_input_channel_layout(ist)) {
1984 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1985 "layout for Input Stream #%d.%d\n", ist->file_index,
1989 decoded_frame->channel_layout = avctx->channel_layout;
1991 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1992 ist->resample_channel_layout);
1993 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1994 decoded_frame->channel_layout);
1996 av_log(NULL, AV_LOG_INFO,
1997 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1998 ist->file_index, ist->st->index,
1999 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2000 ist->resample_channels, layout1,
2001 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2002 avctx->channels, layout2);
2004 ist->resample_sample_fmt = decoded_frame->format;
2005 ist->resample_sample_rate = decoded_frame->sample_rate;
2006 ist->resample_channel_layout = decoded_frame->channel_layout;
2007 ist->resample_channels = avctx->channels;
2009 for (i = 0; i < nb_filtergraphs; i++)
2010 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2011 FilterGraph *fg = filtergraphs[i];
2012 if (configure_filtergraph(fg) < 0) {
2013 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2019 /* if the decoder provides a pts, use it instead of the last packet pts.
2020 the decoder could be delaying output by a packet or more. */
2021 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2022 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2023 decoded_frame_tb = avctx->time_base;
2024 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2025 decoded_frame->pts = decoded_frame->pkt_pts;
2026 decoded_frame_tb = ist->st->time_base;
2027 } else if (pkt->pts != AV_NOPTS_VALUE) {
2028 decoded_frame->pts = pkt->pts;
2029 decoded_frame_tb = ist->st->time_base;
2031 decoded_frame->pts = ist->dts;
2032 decoded_frame_tb = AV_TIME_BASE_Q;
2034 pkt->pts = AV_NOPTS_VALUE;
2035 if (decoded_frame->pts != AV_NOPTS_VALUE)
2036 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2037 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2038 (AVRational){1, avctx->sample_rate});
2039 for (i = 0; i < ist->nb_filters; i++) {
2040 if (i < ist->nb_filters - 1) {
2041 f = ist->filter_frame;
2042 err = av_frame_ref(f, decoded_frame);
2047 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2048 AV_BUFFERSRC_FLAG_PUSH);
2049 if (err == AVERROR_EOF)
2050 err = 0; /* ignore */
2054 decoded_frame->pts = AV_NOPTS_VALUE;
2056 av_frame_unref(ist->filter_frame);
2057 av_frame_unref(decoded_frame);
2058 return err < 0 ? err : ret;
2061 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2063 AVFrame *decoded_frame, *f;
2064 int i, ret = 0, err = 0, resample_changed;
2065 int64_t best_effort_timestamp;
2066 AVRational *frame_sample_aspect;
2068 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2069 return AVERROR(ENOMEM);
2070 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2071 return AVERROR(ENOMEM);
2072 decoded_frame = ist->decoded_frame;
2073 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2075 update_benchmark(NULL);
2076 ret = avcodec_decode_video2(ist->dec_ctx,
2077 decoded_frame, got_output, pkt);
2078 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2080 // The following line may be required in some cases where there is no parser
2081 // or the parser does not has_b_frames correctly
2082 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2083 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2084 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2086 av_log(ist->dec_ctx, AV_LOG_WARNING,
2087 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2088 "If you want to help, upload a sample "
2089 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2090 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2091 ist->dec_ctx->has_b_frames,
2092 ist->st->codec->has_b_frames);
2095 if (*got_output || ret<0)
2096 decode_error_stat[ret<0] ++;
2098 if (ret < 0 && exit_on_error)
2101 if (*got_output && ret >= 0) {
2102 if (ist->dec_ctx->width != decoded_frame->width ||
2103 ist->dec_ctx->height != decoded_frame->height ||
2104 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2105 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2106 decoded_frame->width,
2107 decoded_frame->height,
2108 decoded_frame->format,
2109 ist->dec_ctx->width,
2110 ist->dec_ctx->height,
2111 ist->dec_ctx->pix_fmt);
2115 if (!*got_output || ret < 0)
2118 if(ist->top_field_first>=0)
2119 decoded_frame->top_field_first = ist->top_field_first;
2121 ist->frames_decoded++;
2123 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2124 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2128 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2130 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2131 if(best_effort_timestamp != AV_NOPTS_VALUE)
2132 ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2135 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2136 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2137 ist->st->index, av_ts2str(decoded_frame->pts),
2138 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2139 best_effort_timestamp,
2140 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2141 decoded_frame->key_frame, decoded_frame->pict_type,
2142 ist->st->time_base.num, ist->st->time_base.den);
2147 if (ist->st->sample_aspect_ratio.num)
2148 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2150 resample_changed = ist->resample_width != decoded_frame->width ||
2151 ist->resample_height != decoded_frame->height ||
2152 ist->resample_pix_fmt != decoded_frame->format;
2153 if (resample_changed) {
2154 av_log(NULL, AV_LOG_INFO,
2155 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2156 ist->file_index, ist->st->index,
2157 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2158 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2160 ist->resample_width = decoded_frame->width;
2161 ist->resample_height = decoded_frame->height;
2162 ist->resample_pix_fmt = decoded_frame->format;
2164 for (i = 0; i < nb_filtergraphs; i++) {
2165 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2166 configure_filtergraph(filtergraphs[i]) < 0) {
2167 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2173 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2174 for (i = 0; i < ist->nb_filters; i++) {
2175 if (!frame_sample_aspect->num)
2176 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2178 if (i < ist->nb_filters - 1) {
2179 f = ist->filter_frame;
2180 err = av_frame_ref(f, decoded_frame);
2185 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2186 if (ret == AVERROR_EOF) {
2187 ret = 0; /* ignore */
2188 } else if (ret < 0) {
2189 av_log(NULL, AV_LOG_FATAL,
2190 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2196 av_frame_unref(ist->filter_frame);
2197 av_frame_unref(decoded_frame);
2198 return err < 0 ? err : ret;
2201 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2203 AVSubtitle subtitle;
2204 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2205 &subtitle, got_output, pkt);
2207 if (*got_output || ret<0)
2208 decode_error_stat[ret<0] ++;
2210 if (ret < 0 && exit_on_error)
2213 if (ret < 0 || !*got_output) {
2215 sub2video_flush(ist);
2219 if (ist->fix_sub_duration) {
2221 if (ist->prev_sub.got_output) {
2222 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2223 1000, AV_TIME_BASE);
2224 if (end < ist->prev_sub.subtitle.end_display_time) {
2225 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2226 "Subtitle duration reduced from %d to %d%s\n",
2227 ist->prev_sub.subtitle.end_display_time, end,
2228 end <= 0 ? ", dropping it" : "");
2229 ist->prev_sub.subtitle.end_display_time = end;
2232 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2233 FFSWAP(int, ret, ist->prev_sub.ret);
2234 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2242 sub2video_update(ist, &subtitle);
2244 if (!subtitle.num_rects)
2247 ist->frames_decoded++;
2249 for (i = 0; i < nb_output_streams; i++) {
2250 OutputStream *ost = output_streams[i];
2252 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2253 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2256 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2260 avsubtitle_free(&subtitle);
2264 static int send_filter_eof(InputStream *ist)
2267 for (i = 0; i < ist->nb_filters; i++) {
2268 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2275 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2276 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
2282 if (!ist->saw_first_ts) {
2283 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2285 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2286 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2287 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2289 ist->saw_first_ts = 1;
2292 if (ist->next_dts == AV_NOPTS_VALUE)
2293 ist->next_dts = ist->dts;
2294 if (ist->next_pts == AV_NOPTS_VALUE)
2295 ist->next_pts = ist->pts;
2299 av_init_packet(&avpkt);
2307 if (pkt->dts != AV_NOPTS_VALUE) {
2308 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2309 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2310 ist->next_pts = ist->pts = ist->dts;
2313 // while we have more to decode or while the decoder did output something on EOF
2314 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2318 ist->pts = ist->next_pts;
2319 ist->dts = ist->next_dts;
2321 if (avpkt.size && avpkt.size != pkt->size &&
2322 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2323 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2324 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2325 ist->showed_multi_packet_warning = 1;
2328 switch (ist->dec_ctx->codec_type) {
2329 case AVMEDIA_TYPE_AUDIO:
2330 ret = decode_audio (ist, &avpkt, &got_output);
2332 case AVMEDIA_TYPE_VIDEO:
2333 ret = decode_video (ist, &avpkt, &got_output);
2334 if (avpkt.duration) {
2335 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2336 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2337 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2338 duration = ((int64_t)AV_TIME_BASE *
2339 ist->dec_ctx->framerate.den * ticks) /
2340 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2344 if(ist->dts != AV_NOPTS_VALUE && duration) {
2345 ist->next_dts += duration;
2347 ist->next_dts = AV_NOPTS_VALUE;
2350 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2352 case AVMEDIA_TYPE_SUBTITLE:
2353 ret = transcode_subtitles(ist, &avpkt, &got_output);
2360 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2361 ist->file_index, ist->st->index, av_err2str(ret));
2368 avpkt.pts= AV_NOPTS_VALUE;
2370 // touch data and size only if not EOF
2372 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2380 if (got_output && !pkt)
2384 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2385 if (!pkt && ist->decoding_needed && !got_output) {
2386 int ret = send_filter_eof(ist);
2388 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2393 /* handle stream copy */
2394 if (!ist->decoding_needed) {
2395 ist->dts = ist->next_dts;
2396 switch (ist->dec_ctx->codec_type) {
2397 case AVMEDIA_TYPE_AUDIO:
2398 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2399 ist->dec_ctx->sample_rate;
2401 case AVMEDIA_TYPE_VIDEO:
2402 if (ist->framerate.num) {
2403 // TODO: Remove work-around for c99-to-c89 issue 7
2404 AVRational time_base_q = AV_TIME_BASE_Q;
2405 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2406 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2407 } else if (pkt->duration) {
2408 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2409 } else if(ist->dec_ctx->framerate.num != 0) {
2410 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2411 ist->next_dts += ((int64_t)AV_TIME_BASE *
2412 ist->dec_ctx->framerate.den * ticks) /
2413 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2417 ist->pts = ist->dts;
2418 ist->next_pts = ist->next_dts;
2420 for (i = 0; pkt && i < nb_output_streams; i++) {
2421 OutputStream *ost = output_streams[i];
2423 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2426 do_streamcopy(ist, ost, pkt);
2432 static void print_sdp(void)
2437 AVIOContext *sdp_pb;
2438 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2442 for (i = 0, j = 0; i < nb_output_files; i++) {
2443 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2444 avc[j] = output_files[i]->ctx;
2449 av_sdp_create(avc, j, sdp, sizeof(sdp));
2451 if (!sdp_filename) {
2452 printf("SDP:\n%s\n", sdp);
2455 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2456 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2458 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2459 avio_closep(&sdp_pb);
2460 av_freep(&sdp_filename);
2467 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2470 for (i = 0; hwaccels[i].name; i++)
2471 if (hwaccels[i].pix_fmt == pix_fmt)
2472 return &hwaccels[i];
2476 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2478 InputStream *ist = s->opaque;
2479 const enum AVPixelFormat *p;
2482 for (p = pix_fmts; *p != -1; p++) {
2483 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2484 const HWAccel *hwaccel;
2486 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2489 hwaccel = get_hwaccel(*p);
2491 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2492 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2495 ret = hwaccel->init(s);
2497 if (ist->hwaccel_id == hwaccel->id) {
2498 av_log(NULL, AV_LOG_FATAL,
2499 "%s hwaccel requested for input stream #%d:%d, "
2500 "but cannot be initialized.\n", hwaccel->name,
2501 ist->file_index, ist->st->index);
2502 return AV_PIX_FMT_NONE;
2506 ist->active_hwaccel_id = hwaccel->id;
2507 ist->hwaccel_pix_fmt = *p;
2514 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2516 InputStream *ist = s->opaque;
2518 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2519 return ist->hwaccel_get_buffer(s, frame, flags);
2521 return avcodec_default_get_buffer2(s, frame, flags);
2524 static int init_input_stream(int ist_index, char *error, int error_len)
2527 InputStream *ist = input_streams[ist_index];
2529 if (ist->decoding_needed) {
2530 AVCodec *codec = ist->dec;
2532 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2533 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2534 return AVERROR(EINVAL);
2537 ist->dec_ctx->opaque = ist;
2538 ist->dec_ctx->get_format = get_format;
2539 ist->dec_ctx->get_buffer2 = get_buffer;
2540 ist->dec_ctx->thread_safe_callbacks = 1;
2542 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2543 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2544 (ist->decoding_needed & DECODING_FOR_OST)) {
2545 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2546 if (ist->decoding_needed & DECODING_FOR_FILTER)
2547 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2550 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2551 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2552 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2553 if (ret == AVERROR_EXPERIMENTAL)
2554 abort_codec_experimental(codec, 0);
2556 snprintf(error, error_len,
2557 "Error while opening decoder for input stream "
2559 ist->file_index, ist->st->index, av_err2str(ret));
2562 assert_avoptions(ist->decoder_opts);
2565 ist->next_pts = AV_NOPTS_VALUE;
2566 ist->next_dts = AV_NOPTS_VALUE;
2571 static InputStream *get_input_stream(OutputStream *ost)
2573 if (ost->source_index >= 0)
2574 return input_streams[ost->source_index];
2578 static int compare_int64(const void *a, const void *b)
2580 int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2581 return va < vb ? -1 : va > vb ? +1 : 0;
2584 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2588 if (ost->encoding_needed) {
2589 AVCodec *codec = ost->enc;
2590 AVCodecContext *dec = NULL;
2593 if ((ist = get_input_stream(ost)))
2595 if (dec && dec->subtitle_header) {
2596 /* ASS code assumes this buffer is null terminated so add extra byte. */
2597 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2598 if (!ost->enc_ctx->subtitle_header)
2599 return AVERROR(ENOMEM);
2600 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2601 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2603 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2604 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2605 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2606 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2608 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2609 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2610 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2612 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2613 if (ret == AVERROR_EXPERIMENTAL)
2614 abort_codec_experimental(codec, 1);
2615 snprintf(error, error_len,
2616 "Error while opening encoder for output stream #%d:%d - "
2617 "maybe incorrect parameters such as bit_rate, rate, width or height",
2618 ost->file_index, ost->index);
2621 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2622 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2623 av_buffersink_set_frame_size(ost->filter->filter,
2624 ost->enc_ctx->frame_size);
2625 assert_avoptions(ost->encoder_opts);
2626 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2627 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2628 " It takes bits/s as argument, not kbits/s\n");
2630 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2632 av_log(NULL, AV_LOG_FATAL,
2633 "Error initializing the output stream codec context.\n");
2637 // copy timebase while removing common factors
2638 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2639 ost->st->codec->codec= ost->enc_ctx->codec;
2641 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2643 av_log(NULL, AV_LOG_FATAL,
2644 "Error setting up codec context options.\n");
2647 // copy timebase while removing common factors
2648 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2654 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2655 AVCodecContext *avctx)
2658 int n = 1, i, size, index = 0;
2661 for (p = kf; *p; p++)
2665 pts = av_malloc_array(size, sizeof(*pts));
2667 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2672 for (i = 0; i < n; i++) {
2673 char *next = strchr(p, ',');
2678 if (!memcmp(p, "chapters", 8)) {
2680 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2683 if (avf->nb_chapters > INT_MAX - size ||
2684 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2686 av_log(NULL, AV_LOG_FATAL,
2687 "Could not allocate forced key frames array.\n");
2690 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2691 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2693 for (j = 0; j < avf->nb_chapters; j++) {
2694 AVChapter *c = avf->chapters[j];
2695 av_assert1(index < size);
2696 pts[index++] = av_rescale_q(c->start, c->time_base,
2697 avctx->time_base) + t;
2702 t = parse_time_or_die("force_key_frames", p, 1);
2703 av_assert1(index < size);
2704 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2711 av_assert0(index == size);
2712 qsort(pts, size, sizeof(*pts), compare_int64);
2713 ost->forced_kf_count = size;
2714 ost->forced_kf_pts = pts;
2717 static void report_new_stream(int input_index, AVPacket *pkt)
2719 InputFile *file = input_files[input_index];
2720 AVStream *st = file->ctx->streams[pkt->stream_index];
2722 if (pkt->stream_index < file->nb_streams_warn)
2724 av_log(file->ctx, AV_LOG_WARNING,
2725 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2726 av_get_media_type_string(st->codec->codec_type),
2727 input_index, pkt->stream_index,
2728 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2729 file->nb_streams_warn = pkt->stream_index + 1;
2732 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2734 AVDictionaryEntry *e;
2736 uint8_t *encoder_string;
2737 int encoder_string_len;
2738 int format_flags = 0;
2739 int codec_flags = 0;
2741 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2744 e = av_dict_get(of->opts, "fflags", NULL, 0);
2746 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2749 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2751 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2753 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2756 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2759 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2760 encoder_string = av_mallocz(encoder_string_len);
2761 if (!encoder_string)
2764 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2765 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2767 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2768 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2769 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2770 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2773 static int transcode_init(void)
2775 int ret = 0, i, j, k;
2776 AVFormatContext *oc;
2779 char error[1024] = {0};
2782 for (i = 0; i < nb_filtergraphs; i++) {
2783 FilterGraph *fg = filtergraphs[i];
2784 for (j = 0; j < fg->nb_outputs; j++) {
2785 OutputFilter *ofilter = fg->outputs[j];
2786 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2788 if (fg->nb_inputs != 1)
2790 for (k = nb_input_streams-1; k >= 0 ; k--)
2791 if (fg->inputs[0]->ist == input_streams[k])
2793 ofilter->ost->source_index = k;
2797 /* init framerate emulation */
2798 for (i = 0; i < nb_input_files; i++) {
2799 InputFile *ifile = input_files[i];
2800 if (ifile->rate_emu)
2801 for (j = 0; j < ifile->nb_streams; j++)
2802 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2805 /* for each output stream, we compute the right encoding parameters */
2806 for (i = 0; i < nb_output_streams; i++) {
2807 AVCodecContext *enc_ctx;
2808 AVCodecContext *dec_ctx = NULL;
2809 ost = output_streams[i];
2810 oc = output_files[ost->file_index]->ctx;
2811 ist = get_input_stream(ost);
2813 if (ost->attachment_filename)
2816 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2819 dec_ctx = ist->dec_ctx;
2821 ost->st->disposition = ist->st->disposition;
2822 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2823 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2825 for (j=0; j<oc->nb_streams; j++) {
2826 AVStream *st = oc->streams[j];
2827 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2830 if (j == oc->nb_streams)
2831 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2832 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2835 if (ost->stream_copy) {
2837 uint64_t extra_size;
2839 av_assert0(ist && !ost->filter);
2841 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2843 if (extra_size > INT_MAX) {
2844 return AVERROR(EINVAL);
2847 /* if stream_copy is selected, no need to decode or encode */
2848 enc_ctx->codec_id = dec_ctx->codec_id;
2849 enc_ctx->codec_type = dec_ctx->codec_type;
2851 if (!enc_ctx->codec_tag) {
2852 unsigned int codec_tag;
2853 if (!oc->oformat->codec_tag ||
2854 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2855 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2856 enc_ctx->codec_tag = dec_ctx->codec_tag;
2859 enc_ctx->bit_rate = dec_ctx->bit_rate;
2860 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2861 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2862 enc_ctx->field_order = dec_ctx->field_order;
2863 if (dec_ctx->extradata_size) {
2864 enc_ctx->extradata = av_mallocz(extra_size);
2865 if (!enc_ctx->extradata) {
2866 return AVERROR(ENOMEM);
2868 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2870 enc_ctx->extradata_size= dec_ctx->extradata_size;
2871 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2873 enc_ctx->time_base = ist->st->time_base;
2875 * Avi is a special case here because it supports variable fps but
2876 * having the fps and timebase differe significantly adds quite some
2879 if(!strcmp(oc->oformat->name, "avi")) {
2880 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2881 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2882 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2883 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2885 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2886 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2887 enc_ctx->ticks_per_frame = 2;
2888 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2889 && av_q2d(ist->st->time_base) < 1.0/500
2891 enc_ctx->time_base = dec_ctx->time_base;
2892 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2893 enc_ctx->time_base.den *= 2;
2894 enc_ctx->ticks_per_frame = 2;
2896 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2897 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2898 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2899 && strcmp(oc->oformat->name, "f4v")
2901 if( copy_tb<0 && dec_ctx->time_base.den
2902 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2903 && av_q2d(ist->st->time_base) < 1.0/500
2905 enc_ctx->time_base = dec_ctx->time_base;
2906 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2909 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2910 && dec_ctx->time_base.num < dec_ctx->time_base.den
2911 && dec_ctx->time_base.num > 0
2912 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2913 enc_ctx->time_base = dec_ctx->time_base;
2916 if (!ost->frame_rate.num)
2917 ost->frame_rate = ist->framerate;
2918 if(ost->frame_rate.num)
2919 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2921 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2922 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2924 if (ist->st->nb_side_data) {
2925 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2926 sizeof(*ist->st->side_data));
2927 if (!ost->st->side_data)
2928 return AVERROR(ENOMEM);
2930 ost->st->nb_side_data = 0;
2931 for (j = 0; j < ist->st->nb_side_data; j++) {
2932 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2933 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2935 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2938 sd_dst->data = av_malloc(sd_src->size);
2940 return AVERROR(ENOMEM);
2941 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2942 sd_dst->size = sd_src->size;
2943 sd_dst->type = sd_src->type;
2944 ost->st->nb_side_data++;
2948 ost->parser = av_parser_init(enc_ctx->codec_id);
2950 switch (enc_ctx->codec_type) {
2951 case AVMEDIA_TYPE_AUDIO:
2952 if (audio_volume != 256) {
2953 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2956 enc_ctx->channel_layout = dec_ctx->channel_layout;
2957 enc_ctx->sample_rate = dec_ctx->sample_rate;
2958 enc_ctx->channels = dec_ctx->channels;
2959 enc_ctx->frame_size = dec_ctx->frame_size;
2960 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2961 enc_ctx->block_align = dec_ctx->block_align;
2962 enc_ctx->initial_padding = dec_ctx->delay;
2963 #if FF_API_AUDIOENC_DELAY
2964 enc_ctx->delay = dec_ctx->delay;
2966 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2967 enc_ctx->block_align= 0;
2968 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2969 enc_ctx->block_align= 0;
2971 case AVMEDIA_TYPE_VIDEO:
2972 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2973 enc_ctx->width = dec_ctx->width;
2974 enc_ctx->height = dec_ctx->height;
2975 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2976 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2978 av_mul_q(ost->frame_aspect_ratio,
2979 (AVRational){ enc_ctx->height, enc_ctx->width });
2980 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2981 "with stream copy may produce invalid files\n");
2983 else if (ist->st->sample_aspect_ratio.num)
2984 sar = ist->st->sample_aspect_ratio;
2986 sar = dec_ctx->sample_aspect_ratio;
2987 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2988 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2989 ost->st->r_frame_rate = ist->st->r_frame_rate;
2991 case AVMEDIA_TYPE_SUBTITLE:
2992 enc_ctx->width = dec_ctx->width;
2993 enc_ctx->height = dec_ctx->height;
2995 case AVMEDIA_TYPE_UNKNOWN:
2996 case AVMEDIA_TYPE_DATA:
2997 case AVMEDIA_TYPE_ATTACHMENT:
3004 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3006 /* should only happen when a default codec is not present. */
3007 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3008 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3009 ret = AVERROR(EINVAL);
3013 set_encoder_id(output_files[ost->file_index], ost);
3016 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3017 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3019 fg = init_simple_filtergraph(ist, ost);
3020 if (configure_filtergraph(fg)) {
3021 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3026 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3027 if (!ost->frame_rate.num)
3028 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3029 if (ist && !ost->frame_rate.num)
3030 ost->frame_rate = ist->framerate;
3031 if (ist && !ost->frame_rate.num)
3032 ost->frame_rate = ist->st->r_frame_rate;
3033 if (ist && !ost->frame_rate.num) {
3034 ost->frame_rate = (AVRational){25, 1};
3035 av_log(NULL, AV_LOG_WARNING,
3037 "about the input framerate is available. Falling "
3038 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3039 "if you want a different framerate.\n",
3040 ost->file_index, ost->index);
3042 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3043 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3044 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3045 ost->frame_rate = ost->enc->supported_framerates[idx];
3047 // reduce frame rate for mpeg4 to be within the spec limits
3048 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3049 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3050 ost->frame_rate.num, ost->frame_rate.den, 65535);
3054 switch (enc_ctx->codec_type) {
3055 case AVMEDIA_TYPE_AUDIO:
3056 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3057 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3058 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3059 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3060 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3062 case AVMEDIA_TYPE_VIDEO:
3063 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3064 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3065 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3066 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3067 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3068 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3069 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3071 for (j = 0; j < ost->forced_kf_count; j++)
3072 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3074 enc_ctx->time_base);
3076 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3077 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3078 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3079 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3080 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3081 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3082 if (!strncmp(ost->enc->name, "libx264", 7) &&
3083 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3084 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3085 av_log(NULL, AV_LOG_WARNING,
3086 "No pixel format specified, %s for H.264 encoding chosen.\n"
3087 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3088 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3089 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3090 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3091 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3092 av_log(NULL, AV_LOG_WARNING,
3093 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3094 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3095 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3096 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3098 ost->st->avg_frame_rate = ost->frame_rate;
3101 enc_ctx->width != dec_ctx->width ||
3102 enc_ctx->height != dec_ctx->height ||
3103 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3104 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3107 if (ost->forced_keyframes) {
3108 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3109 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3110 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3112 av_log(NULL, AV_LOG_ERROR,
3113 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3116 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3117 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3118 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3119 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3121 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3122 // parse it only for static kf timings
3123 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3124 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3128 case AVMEDIA_TYPE_SUBTITLE:
3129 enc_ctx->time_base = (AVRational){1, 1000};
3130 if (!enc_ctx->width) {
3131 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3132 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3135 case AVMEDIA_TYPE_DATA:
3143 if (ost->disposition) {
3144 static const AVOption opts[] = {
3145 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3146 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3147 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3148 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3149 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3150 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3151 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3152 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3153 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3154 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3155 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3156 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3157 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3158 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3161 static const AVClass class = {
3163 .item_name = av_default_item_name,
3165 .version = LIBAVUTIL_VERSION_INT,
3167 const AVClass *pclass = &class;
3169 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3175 /* open each encoder */
3176 for (i = 0; i < nb_output_streams; i++) {
3177 ret = init_output_stream(output_streams[i], error, sizeof(error));
3182 /* init input streams */
3183 for (i = 0; i < nb_input_streams; i++)
3184 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3185 for (i = 0; i < nb_output_streams; i++) {
3186 ost = output_streams[i];
3187 avcodec_close(ost->enc_ctx);
3192 /* discard unused programs */
3193 for (i = 0; i < nb_input_files; i++) {
3194 InputFile *ifile = input_files[i];
3195 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3196 AVProgram *p = ifile->ctx->programs[j];
3197 int discard = AVDISCARD_ALL;
3199 for (k = 0; k < p->nb_stream_indexes; k++)
3200 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3201 discard = AVDISCARD_DEFAULT;
3204 p->discard = discard;
3208 /* open files and write file headers */
3209 for (i = 0; i < nb_output_files; i++) {
3210 oc = output_files[i]->ctx;
3211 oc->interrupt_callback = int_cb;
3212 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3213 snprintf(error, sizeof(error),
3214 "Could not write header for output file #%d "
3215 "(incorrect codec parameters ?): %s",
3216 i, av_err2str(ret));
3217 ret = AVERROR(EINVAL);
3220 // assert_avoptions(output_files[i]->opts);
3221 if (strcmp(oc->oformat->name, "rtp")) {
3227 /* dump the file output parameters - cannot be done before in case
3229 for (i = 0; i < nb_output_files; i++) {
3230 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3233 /* dump the stream mapping */
3234 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3235 for (i = 0; i < nb_input_streams; i++) {
3236 ist = input_streams[i];
3238 for (j = 0; j < ist->nb_filters; j++) {
3239 if (ist->filters[j]->graph->graph_desc) {
3240 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3241 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3242 ist->filters[j]->name);
3243 if (nb_filtergraphs > 1)
3244 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3245 av_log(NULL, AV_LOG_INFO, "\n");
3250 for (i = 0; i < nb_output_streams; i++) {
3251 ost = output_streams[i];
3253 if (ost->attachment_filename) {
3254 /* an attached file */
3255 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3256 ost->attachment_filename, ost->file_index, ost->index);
3260 if (ost->filter && ost->filter->graph->graph_desc) {
3261 /* output from a complex graph */
3262 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3263 if (nb_filtergraphs > 1)
3264 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3266 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3267 ost->index, ost->enc ? ost->enc->name : "?");
3271 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3272 input_streams[ost->source_index]->file_index,
3273 input_streams[ost->source_index]->st->index,
3276 if (ost->sync_ist != input_streams[ost->source_index])
3277 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3278 ost->sync_ist->file_index,
3279 ost->sync_ist->st->index);
3280 if (ost->stream_copy)
3281 av_log(NULL, AV_LOG_INFO, " (copy)");
3283 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3284 const AVCodec *out_codec = ost->enc;
3285 const char *decoder_name = "?";
3286 const char *in_codec_name = "?";
3287 const char *encoder_name = "?";
3288 const char *out_codec_name = "?";
3289 const AVCodecDescriptor *desc;
3292 decoder_name = in_codec->name;
3293 desc = avcodec_descriptor_get(in_codec->id);
3295 in_codec_name = desc->name;
3296 if (!strcmp(decoder_name, in_codec_name))
3297 decoder_name = "native";
3301 encoder_name = out_codec->name;
3302 desc = avcodec_descriptor_get(out_codec->id);
3304 out_codec_name = desc->name;
3305 if (!strcmp(encoder_name, out_codec_name))
3306 encoder_name = "native";
3309 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3310 in_codec_name, decoder_name,
3311 out_codec_name, encoder_name);
3313 av_log(NULL, AV_LOG_INFO, "\n");
3317 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3321 if (sdp_filename || want_sdp) {
3325 transcode_init_done = 1;
3330 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3331 static int need_output(void)
3335 for (i = 0; i < nb_output_streams; i++) {
3336 OutputStream *ost = output_streams[i];
3337 OutputFile *of = output_files[ost->file_index];
3338 AVFormatContext *os = output_files[ost->file_index]->ctx;
3340 if (ost->finished ||
3341 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3343 if (ost->frame_number >= ost->max_frames) {
3345 for (j = 0; j < of->ctx->nb_streams; j++)
3346 close_output_stream(output_streams[of->ost_index + j]);
3357 * Select the output stream to process.
3359 * @return selected output stream, or NULL if none available
3361 static OutputStream *choose_output(void)
3364 int64_t opts_min = INT64_MAX;
3365 OutputStream *ost_min = NULL;
3367 for (i = 0; i < nb_output_streams; i++) {
3368 OutputStream *ost = output_streams[i];
3369 int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3371 if (!ost->finished && opts < opts_min) {
3373 ost_min = ost->unavailable ? NULL : ost;
3379 static int check_keyboard_interaction(int64_t cur_time)
3382 static int64_t last_time;
3383 if (received_nb_signals)
3384 return AVERROR_EXIT;
3385 /* read_key() returns 0 on EOF */
3386 if(cur_time - last_time >= 100000 && !run_as_daemon){
3388 last_time = cur_time;
3392 return AVERROR_EXIT;
3393 if (key == '+') av_log_set_level(av_log_get_level()+10);
3394 if (key == '-') av_log_set_level(av_log_get_level()-10);
3395 if (key == 's') qp_hist ^= 1;
3398 do_hex_dump = do_pkt_dump = 0;
3399 } else if(do_pkt_dump){
3403 av_log_set_level(AV_LOG_DEBUG);
3405 if (key == 'c' || key == 'C'){
3406 char buf[4096], target[64], command[256], arg[256] = {0};
3409 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3411 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3416 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3417 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3418 target, time, command, arg);
3419 for (i = 0; i < nb_filtergraphs; i++) {
3420 FilterGraph *fg = filtergraphs[i];
3423 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3424 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3425 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3426 } else if (key == 'c') {
3427 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3428 ret = AVERROR_PATCHWELCOME;
3430 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3432 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3437 av_log(NULL, AV_LOG_ERROR,
3438 "Parse error, at least 3 arguments were expected, "
3439 "only %d given in string '%s'\n", n, buf);
3442 if (key == 'd' || key == 'D'){
3445 debug = input_streams[0]->st->codec->debug<<1;
3446 if(!debug) debug = 1;
3447 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3453 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3457 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3458 fprintf(stderr,"error parsing debug value\n");
3460 for(i=0;i<nb_input_streams;i++) {
3461 input_streams[i]->st->codec->debug = debug;
3463 for(i=0;i<nb_output_streams;i++) {
3464 OutputStream *ost = output_streams[i];
3465 ost->enc_ctx->debug = debug;
3467 if(debug) av_log_set_level(AV_LOG_DEBUG);
3468 fprintf(stderr,"debug=%d\n", debug);
3471 fprintf(stderr, "key function\n"
3472 "? show this help\n"
3473 "+ increase verbosity\n"
3474 "- decrease verbosity\n"
3475 "c Send command to first matching filter supporting it\n"
3476 "C Send/Que command to all matching filters\n"
3477 "D cycle through available debug modes\n"
3478 "h dump packets/hex press to cycle through the 3 states\n"
3480 "s Show QP histogram\n"
3487 static void *input_thread(void *arg)
3490 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3495 ret = av_read_frame(f->ctx, &pkt);
3497 if (ret == AVERROR(EAGAIN)) {
3502 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3505 av_dup_packet(&pkt);
3506 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3507 if (flags && ret == AVERROR(EAGAIN)) {
3509 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3510 av_log(f->ctx, AV_LOG_WARNING,
3511 "Thread message queue blocking; consider raising the "
3512 "thread_queue_size option (current value: %d)\n",
3513 f->thread_queue_size);
3516 if (ret != AVERROR_EOF)
3517 av_log(f->ctx, AV_LOG_ERROR,
3518 "Unable to send packet to main thread: %s\n",
3520 av_free_packet(&pkt);
3521 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3529 static void free_input_threads(void)
3533 for (i = 0; i < nb_input_files; i++) {
3534 InputFile *f = input_files[i];
3537 if (!f || !f->in_thread_queue)
3539 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3540 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3541 av_free_packet(&pkt);
3543 pthread_join(f->thread, NULL);
3545 av_thread_message_queue_free(&f->in_thread_queue);
3549 static int init_input_threads(void)
3553 if (nb_input_files == 1)
3556 for (i = 0; i < nb_input_files; i++) {
3557 InputFile *f = input_files[i];
3559 if (f->ctx->pb ? !f->ctx->pb->seekable :
3560 strcmp(f->ctx->iformat->name, "lavfi"))
3561 f->non_blocking = 1;
3562 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3563 f->thread_queue_size, sizeof(AVPacket));
3567 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3568 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3569 av_thread_message_queue_free(&f->in_thread_queue);
3570 return AVERROR(ret);
3576 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3578 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3580 AV_THREAD_MESSAGE_NONBLOCK : 0);
3584 static int get_input_packet(InputFile *f, AVPacket *pkt)
3588 for (i = 0; i < f->nb_streams; i++) {
3589 InputStream *ist = input_streams[f->ist_index + i];
3590 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3591 int64_t now = av_gettime_relative() - ist->start;
3593 return AVERROR(EAGAIN);
3598 if (nb_input_files > 1)
3599 return get_input_packet_mt(f, pkt);
3601 return av_read_frame(f->ctx, pkt);
3604 static int got_eagain(void)
3607 for (i = 0; i < nb_output_streams; i++)
3608 if (output_streams[i]->unavailable)
3613 static void reset_eagain(void)
3616 for (i = 0; i < nb_input_files; i++)
3617 input_files[i]->eagain = 0;
3618 for (i = 0; i < nb_output_streams; i++)
3619 output_streams[i]->unavailable = 0;
3624 * - 0 -- one packet was read and processed
3625 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3626 * this function should be called again
3627 * - AVERROR_EOF -- this function should not be called again
3629 static int process_input(int file_index)
3631 InputFile *ifile = input_files[file_index];
3632 AVFormatContext *is;
3638 ret = get_input_packet(ifile, &pkt);
3640 if (ret == AVERROR(EAGAIN)) {
3645 if (ret != AVERROR_EOF) {
3646 print_error(is->filename, ret);
3651 for (i = 0; i < ifile->nb_streams; i++) {
3652 ist = input_streams[ifile->ist_index + i];
3653 if (ist->decoding_needed) {
3654 ret = process_input_packet(ist, NULL);
3659 /* mark all outputs that don't go through lavfi as finished */
3660 for (j = 0; j < nb_output_streams; j++) {
3661 OutputStream *ost = output_streams[j];
3663 if (ost->source_index == ifile->ist_index + i &&
3664 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3665 finish_output_stream(ost);
3669 ifile->eof_reached = 1;
3670 return AVERROR(EAGAIN);
3676 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3677 is->streams[pkt.stream_index]);
3679 /* the following test is needed in case new streams appear
3680 dynamically in stream : we ignore them */
3681 if (pkt.stream_index >= ifile->nb_streams) {
3682 report_new_stream(file_index, &pkt);
3683 goto discard_packet;
3686 ist = input_streams[ifile->ist_index + pkt.stream_index];
3688 ist->data_size += pkt.size;
3692 goto discard_packet;
3695 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3696 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3697 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3698 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3699 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3700 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3701 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3702 av_ts2str(input_files[ist->file_index]->ts_offset),
3703 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3706 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3707 int64_t stime, stime2;
3708 // Correcting starttime based on the enabled streams
3709 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3710 // so we instead do it here as part of discontinuity handling
3711 if ( ist->next_dts == AV_NOPTS_VALUE
3712 && ifile->ts_offset == -is->start_time
3713 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3714 int64_t new_start_time = INT64_MAX;
3715 for (i=0; i<is->nb_streams; i++) {
3716 AVStream *st = is->streams[i];
3717 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3719 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3721 if (new_start_time > is->start_time) {
3722 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3723 ifile->ts_offset = -new_start_time;
3727 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3728 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3729 ist->wrap_correction_done = 1;
3731 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3732 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3733 ist->wrap_correction_done = 0;
3735 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3736 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3737 ist->wrap_correction_done = 0;
3741 /* add the stream-global side data to the first packet */
3742 if (ist->nb_packets == 1) {
3743 if (ist->st->nb_side_data)
3744 av_packet_split_side_data(&pkt);
3745 for (i = 0; i < ist->st->nb_side_data; i++) {
3746 AVPacketSideData *src_sd = &ist->st->side_data[i];
3749 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3751 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3754 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3758 memcpy(dst_data, src_sd->data, src_sd->size);
3762 if (pkt.dts != AV_NOPTS_VALUE)
3763 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3764 if (pkt.pts != AV_NOPTS_VALUE)
3765 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3767 if (pkt.pts != AV_NOPTS_VALUE)
3768 pkt.pts *= ist->ts_scale;
3769 if (pkt.dts != AV_NOPTS_VALUE)
3770 pkt.dts *= ist->ts_scale;
3772 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3773 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3774 pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3775 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3776 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3777 int64_t delta = pkt_dts - ifile->last_ts;
3778 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3779 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3780 ifile->ts_offset -= delta;
3781 av_log(NULL, AV_LOG_DEBUG,
3782 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3783 delta, ifile->ts_offset);
3784 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3785 if (pkt.pts != AV_NOPTS_VALUE)
3786 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3790 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3791 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3792 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3794 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3795 int64_t delta = pkt_dts - ist->next_dts;
3796 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3797 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3798 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3799 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3800 ifile->ts_offset -= delta;
3801 av_log(NULL, AV_LOG_DEBUG,
3802 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3803 delta, ifile->ts_offset);
3804 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3805 if (pkt.pts != AV_NOPTS_VALUE)
3806 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3809 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3810 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3811 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3812 pkt.dts = AV_NOPTS_VALUE;
3814 if (pkt.pts != AV_NOPTS_VALUE){
3815 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3816 delta = pkt_pts - ist->next_dts;
3817 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3818 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3819 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3820 pkt.pts = AV_NOPTS_VALUE;
3826 if (pkt.dts != AV_NOPTS_VALUE)
3827 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3830 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3831 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3832 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3833 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3834 av_ts2str(input_files[ist->file_index]->ts_offset),
3835 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3838 sub2video_heartbeat(ist, pkt.pts);
3840 process_input_packet(ist, &pkt);
3843 av_free_packet(&pkt);
3849 * Perform a step of transcoding for the specified filter graph.
3851 * @param[in] graph filter graph to consider
3852 * @param[out] best_ist input stream where a frame would allow to continue
3853 * @return 0 for success, <0 for error
3855 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3858 int nb_requests, nb_requests_max = 0;
3859 InputFilter *ifilter;
3863 ret = avfilter_graph_request_oldest(graph->graph);
3865 return reap_filters(0);
3867 if (ret == AVERROR_EOF) {
3868 ret = reap_filters(1);
3869 for (i = 0; i < graph->nb_outputs; i++)
3870 close_output_stream(graph->outputs[i]->ost);
3873 if (ret != AVERROR(EAGAIN))
3876 for (i = 0; i < graph->nb_inputs; i++) {
3877 ifilter = graph->inputs[i];
3879 if (input_files[ist->file_index]->eagain ||
3880 input_files[ist->file_index]->eof_reached)
3882 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3883 if (nb_requests > nb_requests_max) {
3884 nb_requests_max = nb_requests;
3890 for (i = 0; i < graph->nb_outputs; i++)
3891 graph->outputs[i]->ost->unavailable = 1;
3897 * Run a single step of transcoding.
3899 * @return 0 for success, <0 for error
3901 static int transcode_step(void)
3907 ost = choose_output();
3914 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3919 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3924 av_assert0(ost->source_index >= 0);
3925 ist = input_streams[ost->source_index];
3928 ret = process_input(ist->file_index);
3929 if (ret == AVERROR(EAGAIN)) {
3930 if (input_files[ist->file_index]->eagain)
3931 ost->unavailable = 1;
3936 return ret == AVERROR_EOF ? 0 : ret;
3938 return reap_filters(0);
3942 * The following code is the main loop of the file converter
3944 static int transcode(void)
3947 AVFormatContext *os;
3950 int64_t timer_start;
3952 ret = transcode_init();
3956 if (stdin_interaction) {
3957 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3960 timer_start = av_gettime_relative();
3963 if ((ret = init_input_threads()) < 0)
3967 while (!received_sigterm) {
3968 int64_t cur_time= av_gettime_relative();
3970 /* if 'q' pressed, exits */
3971 if (stdin_interaction)
3972 if (check_keyboard_interaction(cur_time) < 0)
3975 /* check if there's any stream where output is still needed */
3976 if (!need_output()) {
3977 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3981 ret = transcode_step();
3983 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3987 av_strerror(ret, errbuf, sizeof(errbuf));
3989 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3994 /* dump report by using the output first video and audio streams */
3995 print_report(0, timer_start, cur_time);
3998 free_input_threads();
4001 /* at the end of stream, we must flush the decoder buffers */
4002 for (i = 0; i < nb_input_streams; i++) {
4003 ist = input_streams[i];
4004 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4005 process_input_packet(ist, NULL);
4012 /* write the trailer if needed and close file */
4013 for (i = 0; i < nb_output_files; i++) {
4014 os = output_files[i]->ctx;
4015 av_write_trailer(os);
4018 /* dump report by using the first video and audio streams */
4019 print_report(1, timer_start, av_gettime_relative());
4021 /* close each encoder */
4022 for (i = 0; i < nb_output_streams; i++) {
4023 ost = output_streams[i];
4024 if (ost->encoding_needed) {
4025 av_freep(&ost->enc_ctx->stats_in);
4029 /* close each decoder */
4030 for (i = 0; i < nb_input_streams; i++) {
4031 ist = input_streams[i];
4032 if (ist->decoding_needed) {
4033 avcodec_close(ist->dec_ctx);
4034 if (ist->hwaccel_uninit)
4035 ist->hwaccel_uninit(ist->dec_ctx);
4044 free_input_threads();
4047 if (output_streams) {
4048 for (i = 0; i < nb_output_streams; i++) {
4049 ost = output_streams[i];
4052 fclose(ost->logfile);
4053 ost->logfile = NULL;
4055 av_freep(&ost->forced_kf_pts);
4056 av_freep(&ost->apad);
4057 av_freep(&ost->disposition);
4058 av_dict_free(&ost->encoder_opts);
4059 av_dict_free(&ost->sws_dict);
4060 av_dict_free(&ost->swr_opts);
4061 av_dict_free(&ost->resample_opts);
4062 av_dict_free(&ost->bsf_args);
4070 static int64_t getutime(void)
4073 struct rusage rusage;
4075 getrusage(RUSAGE_SELF, &rusage);
4076 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4077 #elif HAVE_GETPROCESSTIMES
4079 FILETIME c, e, k, u;
4080 proc = GetCurrentProcess();
4081 GetProcessTimes(proc, &c, &e, &k, &u);
4082 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4084 return av_gettime_relative();
4088 static int64_t getmaxrss(void)
4090 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4091 struct rusage rusage;
4092 getrusage(RUSAGE_SELF, &rusage);
4093 return (int64_t)rusage.ru_maxrss * 1024;
4094 #elif HAVE_GETPROCESSMEMORYINFO
4096 PROCESS_MEMORY_COUNTERS memcounters;
4097 proc = GetCurrentProcess();
4098 memcounters.cb = sizeof(memcounters);
4099 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4100 return memcounters.PeakPagefileUsage;
4106 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4110 int main(int argc, char **argv)
4115 register_exit(ffmpeg_cleanup);
4117 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4119 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4120 parse_loglevel(argc, argv, options);
4122 if(argc>1 && !strcmp(argv[1], "-d")){
4124 av_log_set_callback(log_callback_null);
4129 avcodec_register_all();
4131 avdevice_register_all();
4133 avfilter_register_all();
4135 avformat_network_init();
4137 show_banner(argc, argv, options);
4141 /* parse options and open all input/output files */
4142 ret = ffmpeg_parse_options(argc, argv);
4146 if (nb_output_files <= 0 && nb_input_files == 0) {
4148 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4152 /* file converter / grab */
4153 if (nb_output_files <= 0) {
4154 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4158 // if (nb_input_files == 0) {
4159 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4163 current_time = ti = getutime();
4164 if (transcode() < 0)
4166 ti = getutime() - ti;
4168 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4170 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4171 decode_error_stat[0], decode_error_stat[1]);
4172 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4175 exit_program(received_nb_signals ? 255 : main_return_code);
4176 return main_return_code;