2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
135 static uint8_t *subtitle_out;
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
147 FilterGraph **filtergraphs;
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
158 static void free_input_threads(void);
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
166 static int sub2video_get_blank_frame(InputStream *ist)
169 AVFrame *frame = ist->sub2video.frame;
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184 uint32_t *pal, *dst2;
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
208 src += r->pict.linesize[0];
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
214 AVFrame *frame = ist->sub2video.frame;
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
227 AVFrame *frame = ist->sub2video.frame;
231 int64_t pts, end_pts;
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
242 pts = ist->sub2video.end_pts;
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 InputFile *infile = input_files[ist->file_index];
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
284 sub2video_push_ref(ist2, pts2);
288 static void sub2video_flush(InputStream *ist)
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
298 /* end of sub2video hack */
300 static void term_exit_sigsafe(void)
304 tcsetattr (0, TCSANOW, &oldtty);
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
321 sigterm_handler(int sig)
323 received_sigterm = sig;
324 received_nb_signals++;
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371 if (tcgetattr (0, &tty) == 0) {
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
384 tcsetattr (0, TCSANOW, &tty);
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
393 signal(SIGXCPU, sigterm_handler);
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 /* read a key without blocking */
401 static int read_key(void)
413 n = select(1, &rfds, NULL, NULL, &tv);
422 # if HAVE_PEEKNAMEDPIPE
424 static HANDLE input_handle;
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
452 static int decode_interrupt_cb(void *ctx)
454 return received_nb_signals > transcode_init_done;
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
459 static void ffmpeg_cleanup(int ret)
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
483 av_freep(&filtergraphs[i]);
485 av_freep(&filtergraphs);
487 av_freep(&subtitle_out);
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
501 av_freep(&output_files[i]);
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
510 bsfc = ost->bitstream_filters;
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
520 av_parser_close(ost->parser);
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
530 av_dict_free(&ost->sws_dict);
532 avcodec_free_context(&ost->enc_ctx);
534 av_freep(&output_streams[i]);
537 free_input_threads();
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
554 avcodec_free_context(&ist->dec_ctx);
556 av_freep(&input_streams[i]);
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
565 av_freep(&vstats_filename);
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
574 avformat_network_deinit();
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
588 AVDictionaryEntry *t = NULL;
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
595 void assert_avoptions(AVDictionary *m)
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
604 static void abort_codec_experimental(AVCodec *c, int encoder)
609 static void update_benchmark(const char *fmt, ...)
611 if (do_benchmark_all) {
612 int64_t t = getutime();
618 vsnprintf(buf, sizeof(buf), fmt, va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
690 av_packet_split_side_data(pkt);
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
697 if (pkt->size == 0 && pkt->side_data_elems == 0)
700 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
701 if (pkt->dts != AV_NOPTS_VALUE &&
702 pkt->pts != AV_NOPTS_VALUE &&
703 pkt->dts > pkt->pts) {
704 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
706 ost->file_index, ost->st->index);
708 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
709 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
710 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
713 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
714 pkt->dts != AV_NOPTS_VALUE &&
715 ost->last_mux_dts != AV_NOPTS_VALUE) {
716 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
717 if (pkt->dts < max) {
718 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
719 av_log(s, loglevel, "Non-monotonous DTS in output stream "
720 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
721 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
723 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
726 av_log(s, loglevel, "changing to %"PRId64". This may result "
727 "in incorrect timestamps in the output file.\n",
729 if(pkt->pts >= pkt->dts)
730 pkt->pts = FFMAX(pkt->pts, max);
735 ost->last_mux_dts = pkt->dts;
737 ost->data_size += pkt->size;
738 ost->packets_written++;
740 pkt->stream_index = ost->index;
743 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
744 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
745 av_get_media_type_string(ost->enc_ctx->codec_type),
746 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
747 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
752 ret = av_interleaved_write_frame(s, pkt);
754 print_error("av_interleaved_write_frame()", ret);
755 main_return_code = 1;
756 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
758 av_packet_unref(pkt);
761 static void close_output_stream(OutputStream *ost)
763 OutputFile *of = output_files[ost->file_index];
765 ost->finished |= ENCODER_FINISHED;
767 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
768 of->recording_time = FFMIN(of->recording_time, end);
772 static int check_recording_time(OutputStream *ost)
774 OutputFile *of = output_files[ost->file_index];
776 if (of->recording_time != INT64_MAX &&
777 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
778 AV_TIME_BASE_Q) >= 0) {
779 close_output_stream(ost);
785 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
788 AVCodecContext *enc = ost->enc_ctx;
792 av_init_packet(&pkt);
796 if (!check_recording_time(ost))
799 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
800 frame->pts = ost->sync_opts;
801 ost->sync_opts = frame->pts + frame->nb_samples;
802 ost->samples_encoded += frame->nb_samples;
803 ost->frames_encoded++;
805 av_assert0(pkt.size || !pkt.data);
806 update_benchmark(NULL);
808 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
809 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
810 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
811 enc->time_base.num, enc->time_base.den);
814 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
815 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
818 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
821 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
824 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
825 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
826 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
827 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
830 write_frame(s, &pkt, ost);
834 static void do_subtitle_out(AVFormatContext *s,
839 int subtitle_out_max_size = 1024 * 1024;
840 int subtitle_out_size, nb, i;
845 if (sub->pts == AV_NOPTS_VALUE) {
846 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
855 subtitle_out = av_malloc(subtitle_out_max_size);
857 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
862 /* Note: DVB subtitle need one packet to draw them and one other
863 packet to clear them */
864 /* XXX: signal it in the codec context ? */
865 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
870 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
872 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
873 pts -= output_files[ost->file_index]->start_time;
874 for (i = 0; i < nb; i++) {
875 unsigned save_num_rects = sub->num_rects;
877 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
878 if (!check_recording_time(ost))
882 // start_display_time is required to be 0
883 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
884 sub->end_display_time -= sub->start_display_time;
885 sub->start_display_time = 0;
889 ost->frames_encoded++;
891 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
892 subtitle_out_max_size, sub);
894 sub->num_rects = save_num_rects;
895 if (subtitle_out_size < 0) {
896 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
900 av_init_packet(&pkt);
901 pkt.data = subtitle_out;
902 pkt.size = subtitle_out_size;
903 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
904 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
905 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
906 /* XXX: the pts correction is handled here. Maybe handling
907 it in the codec would be better */
909 pkt.pts += 90 * sub->start_display_time;
911 pkt.pts += 90 * sub->end_display_time;
914 write_frame(s, &pkt, ost);
918 static void do_video_out(AVFormatContext *s,
920 AVFrame *next_picture,
923 int ret, format_video_sync;
925 AVCodecContext *enc = ost->enc_ctx;
926 AVCodecContext *mux_enc = ost->st->codec;
927 int nb_frames, nb0_frames, i;
928 double delta, delta0;
931 InputStream *ist = NULL;
932 AVFilterContext *filter = ost->filter->filter;
934 if (ost->source_index >= 0)
935 ist = input_streams[ost->source_index];
937 if (filter->inputs[0]->frame_rate.num > 0 &&
938 filter->inputs[0]->frame_rate.den > 0)
939 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
941 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
942 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
944 if (!ost->filters_script &&
948 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
949 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
954 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
955 ost->last_nb0_frames[1],
956 ost->last_nb0_frames[2]);
958 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
959 delta = delta0 + duration;
961 /* by default, we output a single frame */
962 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
965 format_video_sync = video_sync_method;
966 if (format_video_sync == VSYNC_AUTO) {
967 if(!strcmp(s->oformat->name, "avi")) {
968 format_video_sync = VSYNC_VFR;
970 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
972 && format_video_sync == VSYNC_CFR
973 && input_files[ist->file_index]->ctx->nb_streams == 1
974 && input_files[ist->file_index]->input_ts_offset == 0) {
975 format_video_sync = VSYNC_VSCFR;
977 if (format_video_sync == VSYNC_CFR && copy_ts) {
978 format_video_sync = VSYNC_VSCFR;
981 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
985 format_video_sync != VSYNC_PASSTHROUGH &&
986 format_video_sync != VSYNC_DROP) {
988 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
990 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
991 sync_ipts = ost->sync_opts;
996 switch (format_video_sync) {
998 if (ost->frame_number == 0 && delta0 >= 0.5) {
999 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1002 ost->sync_opts = lrint(sync_ipts);
1005 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1006 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1008 } else if (delta < -1.1)
1010 else if (delta > 1.1) {
1011 nb_frames = lrintf(delta);
1013 nb0_frames = lrintf(delta0 - 0.6);
1019 else if (delta > 0.6)
1020 ost->sync_opts = lrint(sync_ipts);
1023 case VSYNC_PASSTHROUGH:
1024 ost->sync_opts = lrint(sync_ipts);
1031 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1032 nb0_frames = FFMIN(nb0_frames, nb_frames);
1034 memmove(ost->last_nb0_frames + 1,
1035 ost->last_nb0_frames,
1036 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1037 ost->last_nb0_frames[0] = nb0_frames;
1039 if (nb0_frames == 0 && ost->last_dropped) {
1041 av_log(NULL, AV_LOG_VERBOSE,
1042 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1043 ost->frame_number, ost->st->index, ost->last_frame->pts);
1045 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1046 if (nb_frames > dts_error_threshold * 30) {
1047 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1051 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1052 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1054 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1056 /* duplicates frame if needed */
1057 for (i = 0; i < nb_frames; i++) {
1058 AVFrame *in_picture;
1059 av_init_packet(&pkt);
1063 if (i < nb0_frames && ost->last_frame) {
1064 in_picture = ost->last_frame;
1066 in_picture = next_picture;
1071 in_picture->pts = ost->sync_opts;
1074 if (!check_recording_time(ost))
1076 if (ost->frame_number >= ost->max_frames)
1080 #if FF_API_LAVF_FMT_RAWPICTURE
1081 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1082 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1083 /* raw pictures are written as AVPicture structure to
1084 avoid any copies. We support temporarily the older
1086 if (in_picture->interlaced_frame)
1087 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1089 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1090 pkt.data = (uint8_t *)in_picture;
1091 pkt.size = sizeof(AVPicture);
1092 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1093 pkt.flags |= AV_PKT_FLAG_KEY;
1095 write_frame(s, &pkt, ost);
1099 int got_packet, forced_keyframe = 0;
1102 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1103 ost->top_field_first >= 0)
1104 in_picture->top_field_first = !!ost->top_field_first;
1106 if (in_picture->interlaced_frame) {
1107 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1108 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1110 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1112 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1114 in_picture->quality = enc->global_quality;
1115 in_picture->pict_type = 0;
1117 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1118 in_picture->pts * av_q2d(enc->time_base) : NAN;
1119 if (ost->forced_kf_index < ost->forced_kf_count &&
1120 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1121 ost->forced_kf_index++;
1122 forced_keyframe = 1;
1123 } else if (ost->forced_keyframes_pexpr) {
1125 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1126 res = av_expr_eval(ost->forced_keyframes_pexpr,
1127 ost->forced_keyframes_expr_const_values, NULL);
1128 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1129 ost->forced_keyframes_expr_const_values[FKF_N],
1130 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1131 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1132 ost->forced_keyframes_expr_const_values[FKF_T],
1133 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1136 forced_keyframe = 1;
1137 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1138 ost->forced_keyframes_expr_const_values[FKF_N];
1139 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1140 ost->forced_keyframes_expr_const_values[FKF_T];
1141 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1144 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1145 } else if ( ost->forced_keyframes
1146 && !strncmp(ost->forced_keyframes, "source", 6)
1147 && in_picture->key_frame==1) {
1148 forced_keyframe = 1;
1151 if (forced_keyframe) {
1152 in_picture->pict_type = AV_PICTURE_TYPE_I;
1153 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1156 update_benchmark(NULL);
1158 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1159 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1160 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1161 enc->time_base.num, enc->time_base.den);
1164 ost->frames_encoded++;
1166 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1167 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1169 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1175 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1176 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1177 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1178 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1181 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1182 pkt.pts = ost->sync_opts;
1184 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1187 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1188 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1189 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1190 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1193 frame_size = pkt.size;
1194 write_frame(s, &pkt, ost);
1196 /* if two pass, output log */
1197 if (ost->logfile && enc->stats_out) {
1198 fprintf(ost->logfile, "%s", enc->stats_out);
1204 * For video, number of frames in == number of packets out.
1205 * But there may be reordering, so we can't throw away frames on encoder
1206 * flush, we need to limit them here, before they go into encoder.
1208 ost->frame_number++;
1210 if (vstats_filename && frame_size)
1211 do_video_stats(ost, frame_size);
1214 if (!ost->last_frame)
1215 ost->last_frame = av_frame_alloc();
1216 av_frame_unref(ost->last_frame);
1217 if (next_picture && ost->last_frame)
1218 av_frame_ref(ost->last_frame, next_picture);
1220 av_frame_free(&ost->last_frame);
1223 static double psnr(double d)
1225 return -10.0 * log10(d);
1228 static void do_video_stats(OutputStream *ost, int frame_size)
1230 AVCodecContext *enc;
1232 double ti1, bitrate, avg_bitrate;
1234 /* this is executed just the first time do_video_stats is called */
1236 vstats_file = fopen(vstats_filename, "w");
1244 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1245 frame_number = ost->st->nb_frames;
1246 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1247 ost->quality / (float)FF_QP2LAMBDA);
1249 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1250 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1252 fprintf(vstats_file,"f_size= %6d ", frame_size);
1253 /* compute pts value */
1254 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1258 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1259 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1260 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1261 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1262 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1266 static void finish_output_stream(OutputStream *ost)
1268 OutputFile *of = output_files[ost->file_index];
1271 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1274 for (i = 0; i < of->ctx->nb_streams; i++)
1275 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1280 * Get and encode new output from any of the filtergraphs, without causing
1283 * @return 0 for success, <0 for severe errors
1285 static int reap_filters(int flush)
1287 AVFrame *filtered_frame = NULL;
1290 /* Reap all buffers present in the buffer sinks */
1291 for (i = 0; i < nb_output_streams; i++) {
1292 OutputStream *ost = output_streams[i];
1293 OutputFile *of = output_files[ost->file_index];
1294 AVFilterContext *filter;
1295 AVCodecContext *enc = ost->enc_ctx;
1300 filter = ost->filter->filter;
1302 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1303 return AVERROR(ENOMEM);
1305 filtered_frame = ost->filtered_frame;
1308 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1309 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1310 AV_BUFFERSINK_FLAG_NO_REQUEST);
1312 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1313 av_log(NULL, AV_LOG_WARNING,
1314 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1315 } else if (flush && ret == AVERROR_EOF) {
1316 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1317 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1321 if (ost->finished) {
1322 av_frame_unref(filtered_frame);
1325 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1326 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1327 AVRational tb = enc->time_base;
1328 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1330 tb.den <<= extra_bits;
1332 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1333 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1334 float_pts /= 1 << extra_bits;
1335 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1336 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1338 filtered_frame->pts =
1339 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1340 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1342 //if (ost->source_index >= 0)
1343 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1345 switch (filter->inputs[0]->type) {
1346 case AVMEDIA_TYPE_VIDEO:
1347 if (!ost->frame_aspect_ratio.num)
1348 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1351 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1352 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1354 enc->time_base.num, enc->time_base.den);
1357 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1359 case AVMEDIA_TYPE_AUDIO:
1360 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1361 enc->channels != av_frame_get_channels(filtered_frame)) {
1362 av_log(NULL, AV_LOG_ERROR,
1363 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1366 do_audio_out(of->ctx, ost, filtered_frame);
1369 // TODO support subtitle filters
1373 av_frame_unref(filtered_frame);
1380 static void print_final_stats(int64_t total_size)
1382 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1383 uint64_t subtitle_size = 0;
1384 uint64_t data_size = 0;
1385 float percent = -1.0;
1389 for (i = 0; i < nb_output_streams; i++) {
1390 OutputStream *ost = output_streams[i];
1391 switch (ost->enc_ctx->codec_type) {
1392 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1393 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1394 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1395 default: other_size += ost->data_size; break;
1397 extra_size += ost->enc_ctx->extradata_size;
1398 data_size += ost->data_size;
1399 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1400 != AV_CODEC_FLAG_PASS1)
1404 if (data_size && total_size>0 && total_size >= data_size)
1405 percent = 100.0 * (total_size - data_size) / data_size;
1407 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1408 video_size / 1024.0,
1409 audio_size / 1024.0,
1410 subtitle_size / 1024.0,
1411 other_size / 1024.0,
1412 extra_size / 1024.0);
1414 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1416 av_log(NULL, AV_LOG_INFO, "unknown");
1417 av_log(NULL, AV_LOG_INFO, "\n");
1419 /* print verbose per-stream stats */
1420 for (i = 0; i < nb_input_files; i++) {
1421 InputFile *f = input_files[i];
1422 uint64_t total_packets = 0, total_size = 0;
1424 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1425 i, f->ctx->filename);
1427 for (j = 0; j < f->nb_streams; j++) {
1428 InputStream *ist = input_streams[f->ist_index + j];
1429 enum AVMediaType type = ist->dec_ctx->codec_type;
1431 total_size += ist->data_size;
1432 total_packets += ist->nb_packets;
1434 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1435 i, j, media_type_string(type));
1436 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1437 ist->nb_packets, ist->data_size);
1439 if (ist->decoding_needed) {
1440 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1441 ist->frames_decoded);
1442 if (type == AVMEDIA_TYPE_AUDIO)
1443 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1444 av_log(NULL, AV_LOG_VERBOSE, "; ");
1447 av_log(NULL, AV_LOG_VERBOSE, "\n");
1450 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1451 total_packets, total_size);
1454 for (i = 0; i < nb_output_files; i++) {
1455 OutputFile *of = output_files[i];
1456 uint64_t total_packets = 0, total_size = 0;
1458 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1459 i, of->ctx->filename);
1461 for (j = 0; j < of->ctx->nb_streams; j++) {
1462 OutputStream *ost = output_streams[of->ost_index + j];
1463 enum AVMediaType type = ost->enc_ctx->codec_type;
1465 total_size += ost->data_size;
1466 total_packets += ost->packets_written;
1468 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1469 i, j, media_type_string(type));
1470 if (ost->encoding_needed) {
1471 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1472 ost->frames_encoded);
1473 if (type == AVMEDIA_TYPE_AUDIO)
1474 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1475 av_log(NULL, AV_LOG_VERBOSE, "; ");
1478 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1479 ost->packets_written, ost->data_size);
1481 av_log(NULL, AV_LOG_VERBOSE, "\n");
1484 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1485 total_packets, total_size);
1487 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1488 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1490 av_log(NULL, AV_LOG_WARNING, "\n");
1492 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1497 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1500 AVBPrint buf_script;
1502 AVFormatContext *oc;
1504 AVCodecContext *enc;
1505 int frame_number, vid, i;
1508 int64_t pts = INT64_MIN + 1;
1509 static int64_t last_time = -1;
1510 static int qp_histogram[52];
1511 int hours, mins, secs, us;
1515 if (!print_stats && !is_last_report && !progress_avio)
1518 if (!is_last_report) {
1519 if (last_time == -1) {
1520 last_time = cur_time;
1523 if ((cur_time - last_time) < 500000)
1525 last_time = cur_time;
1528 t = (cur_time-timer_start) / 1000000.0;
1531 oc = output_files[0]->ctx;
1533 total_size = avio_size(oc->pb);
1534 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1535 total_size = avio_tell(oc->pb);
1539 av_bprint_init(&buf_script, 0, 1);
1540 for (i = 0; i < nb_output_streams; i++) {
1542 ost = output_streams[i];
1544 if (!ost->stream_copy)
1545 q = ost->quality / (float) FF_QP2LAMBDA;
1547 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1548 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1549 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1550 ost->file_index, ost->index, q);
1552 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1555 frame_number = ost->frame_number;
1556 fps = t > 1 ? frame_number / t : 0;
1557 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1558 frame_number, fps < 9.95, fps, q);
1559 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1560 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1561 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1562 ost->file_index, ost->index, q);
1564 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1568 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1570 for (j = 0; j < 32; j++)
1571 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1574 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1576 double error, error_sum = 0;
1577 double scale, scale_sum = 0;
1579 char type[3] = { 'Y','U','V' };
1580 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1581 for (j = 0; j < 3; j++) {
1582 if (is_last_report) {
1583 error = enc->error[j];
1584 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1586 error = ost->error[j];
1587 scale = enc->width * enc->height * 255.0 * 255.0;
1593 p = psnr(error / scale);
1594 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1595 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1596 ost->file_index, ost->index, type[j] | 32, p);
1598 p = psnr(error_sum / scale_sum);
1599 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1600 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1601 ost->file_index, ost->index, p);
1605 /* compute min output value */
1606 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1607 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1608 ost->st->time_base, AV_TIME_BASE_Q));
1610 nb_frames_drop += ost->last_dropped;
1613 secs = FFABS(pts) / AV_TIME_BASE;
1614 us = FFABS(pts) % AV_TIME_BASE;
1620 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1621 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1623 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1625 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1626 "size=%8.0fkB time=", total_size / 1024.0);
1628 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1629 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1630 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1631 (100 * us) / AV_TIME_BASE);
1634 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1635 av_bprintf(&buf_script, "bitrate=N/A\n");
1637 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1638 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1641 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1642 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1643 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1644 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1645 hours, mins, secs, us);
1647 if (nb_frames_dup || nb_frames_drop)
1648 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1649 nb_frames_dup, nb_frames_drop);
1650 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1651 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1654 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1655 av_bprintf(&buf_script, "speed=N/A\n");
1657 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1658 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1661 if (print_stats || is_last_report) {
1662 const char end = is_last_report ? '\n' : '\r';
1663 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1664 fprintf(stderr, "%s %c", buf, end);
1666 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1671 if (progress_avio) {
1672 av_bprintf(&buf_script, "progress=%s\n",
1673 is_last_report ? "end" : "continue");
1674 avio_write(progress_avio, buf_script.str,
1675 FFMIN(buf_script.len, buf_script.size - 1));
1676 avio_flush(progress_avio);
1677 av_bprint_finalize(&buf_script, NULL);
1678 if (is_last_report) {
1679 if ((ret = avio_closep(&progress_avio)) < 0)
1680 av_log(NULL, AV_LOG_ERROR,
1681 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1686 print_final_stats(total_size);
1689 static void flush_encoders(void)
1693 for (i = 0; i < nb_output_streams; i++) {
1694 OutputStream *ost = output_streams[i];
1695 AVCodecContext *enc = ost->enc_ctx;
1696 AVFormatContext *os = output_files[ost->file_index]->ctx;
1697 int stop_encoding = 0;
1699 if (!ost->encoding_needed)
1702 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1704 #if FF_API_LAVF_FMT_RAWPICTURE
1705 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1710 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1713 switch (enc->codec_type) {
1714 case AVMEDIA_TYPE_AUDIO:
1715 encode = avcodec_encode_audio2;
1718 case AVMEDIA_TYPE_VIDEO:
1719 encode = avcodec_encode_video2;
1730 av_init_packet(&pkt);
1734 update_benchmark(NULL);
1735 ret = encode(enc, &pkt, NULL, &got_packet);
1736 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1738 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1743 if (ost->logfile && enc->stats_out) {
1744 fprintf(ost->logfile, "%s", enc->stats_out);
1750 if (ost->finished & MUXER_FINISHED) {
1751 av_packet_unref(&pkt);
1754 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1755 pkt_size = pkt.size;
1756 write_frame(os, &pkt, ost);
1757 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1758 do_video_stats(ost, pkt_size);
1769 * Check whether a packet from ist should be written into ost at this time
1771 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1773 OutputFile *of = output_files[ost->file_index];
1774 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1776 if (ost->source_index != ist_index)
1782 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1788 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1790 OutputFile *of = output_files[ost->file_index];
1791 InputFile *f = input_files [ist->file_index];
1792 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1793 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1797 av_init_packet(&opkt);
1799 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1800 !ost->copy_initial_nonkeyframes)
1803 if (!ost->frame_number && !ost->copy_prior_start) {
1804 int64_t comp_start = start_time;
1805 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1806 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1807 if (pkt->pts == AV_NOPTS_VALUE ?
1808 ist->pts < comp_start :
1809 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1813 if (of->recording_time != INT64_MAX &&
1814 ist->pts >= of->recording_time + start_time) {
1815 close_output_stream(ost);
1819 if (f->recording_time != INT64_MAX) {
1820 start_time = f->ctx->start_time;
1821 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1822 start_time += f->start_time;
1823 if (ist->pts >= f->recording_time + start_time) {
1824 close_output_stream(ost);
1829 /* force the input stream PTS */
1830 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1833 if (pkt->pts != AV_NOPTS_VALUE)
1834 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1836 opkt.pts = AV_NOPTS_VALUE;
1838 if (pkt->dts == AV_NOPTS_VALUE)
1839 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1841 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1842 opkt.dts -= ost_tb_start_time;
1844 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1845 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1847 duration = ist->dec_ctx->frame_size;
1848 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1849 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1850 ost->st->time_base) - ost_tb_start_time;
1853 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1854 opkt.flags = pkt->flags;
1855 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1856 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1857 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1858 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1859 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1861 int ret = av_parser_change(ost->parser, ost->st->codec,
1862 &opkt.data, &opkt.size,
1863 pkt->data, pkt->size,
1864 pkt->flags & AV_PKT_FLAG_KEY);
1866 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1871 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1876 opkt.data = pkt->data;
1877 opkt.size = pkt->size;
1879 av_copy_packet_side_data(&opkt, pkt);
1881 #if FF_API_LAVF_FMT_RAWPICTURE
1882 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1883 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1884 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1885 /* store AVPicture in AVPacket, as expected by the output format */
1886 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1888 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1892 opkt.data = (uint8_t *)&pict;
1893 opkt.size = sizeof(AVPicture);
1894 opkt.flags |= AV_PKT_FLAG_KEY;
1898 write_frame(of->ctx, &opkt, ost);
1901 int guess_input_channel_layout(InputStream *ist)
1903 AVCodecContext *dec = ist->dec_ctx;
1905 if (!dec->channel_layout) {
1906 char layout_name[256];
1908 if (dec->channels > ist->guess_layout_max)
1910 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1911 if (!dec->channel_layout)
1913 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1914 dec->channels, dec->channel_layout);
1915 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1916 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1921 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1923 if (*got_output || ret<0)
1924 decode_error_stat[ret<0] ++;
1926 if (ret < 0 && exit_on_error)
1929 if (exit_on_error && *got_output && ist) {
1930 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1931 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1937 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1939 AVFrame *decoded_frame, *f;
1940 AVCodecContext *avctx = ist->dec_ctx;
1941 int i, ret, err = 0, resample_changed;
1942 AVRational decoded_frame_tb;
1944 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1945 return AVERROR(ENOMEM);
1946 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1947 return AVERROR(ENOMEM);
1948 decoded_frame = ist->decoded_frame;
1950 update_benchmark(NULL);
1951 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1952 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1954 if (ret >= 0 && avctx->sample_rate <= 0) {
1955 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1956 ret = AVERROR_INVALIDDATA;
1959 check_decode_result(ist, got_output, ret);
1961 if (!*got_output || ret < 0)
1964 ist->samples_decoded += decoded_frame->nb_samples;
1965 ist->frames_decoded++;
1968 /* increment next_dts to use for the case where the input stream does not
1969 have timestamps or there are multiple frames in the packet */
1970 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1976 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1977 ist->resample_channels != avctx->channels ||
1978 ist->resample_channel_layout != decoded_frame->channel_layout ||
1979 ist->resample_sample_rate != decoded_frame->sample_rate;
1980 if (resample_changed) {
1981 char layout1[64], layout2[64];
1983 if (!guess_input_channel_layout(ist)) {
1984 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1985 "layout for Input Stream #%d.%d\n", ist->file_index,
1989 decoded_frame->channel_layout = avctx->channel_layout;
1991 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1992 ist->resample_channel_layout);
1993 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1994 decoded_frame->channel_layout);
1996 av_log(NULL, AV_LOG_INFO,
1997 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1998 ist->file_index, ist->st->index,
1999 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2000 ist->resample_channels, layout1,
2001 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2002 avctx->channels, layout2);
2004 ist->resample_sample_fmt = decoded_frame->format;
2005 ist->resample_sample_rate = decoded_frame->sample_rate;
2006 ist->resample_channel_layout = decoded_frame->channel_layout;
2007 ist->resample_channels = avctx->channels;
2009 for (i = 0; i < nb_filtergraphs; i++)
2010 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2011 FilterGraph *fg = filtergraphs[i];
2012 if (configure_filtergraph(fg) < 0) {
2013 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2019 /* if the decoder provides a pts, use it instead of the last packet pts.
2020 the decoder could be delaying output by a packet or more. */
2021 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2022 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2023 decoded_frame_tb = avctx->time_base;
2024 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2025 decoded_frame->pts = decoded_frame->pkt_pts;
2026 decoded_frame_tb = ist->st->time_base;
2027 } else if (pkt->pts != AV_NOPTS_VALUE) {
2028 decoded_frame->pts = pkt->pts;
2029 decoded_frame_tb = ist->st->time_base;
2031 decoded_frame->pts = ist->dts;
2032 decoded_frame_tb = AV_TIME_BASE_Q;
2034 pkt->pts = AV_NOPTS_VALUE;
2035 if (decoded_frame->pts != AV_NOPTS_VALUE)
2036 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2037 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2038 (AVRational){1, avctx->sample_rate});
2039 ist->nb_samples = decoded_frame->nb_samples;
2040 for (i = 0; i < ist->nb_filters; i++) {
2041 if (i < ist->nb_filters - 1) {
2042 f = ist->filter_frame;
2043 err = av_frame_ref(f, decoded_frame);
2048 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2049 AV_BUFFERSRC_FLAG_PUSH);
2050 if (err == AVERROR_EOF)
2051 err = 0; /* ignore */
2055 decoded_frame->pts = AV_NOPTS_VALUE;
2057 av_frame_unref(ist->filter_frame);
2058 av_frame_unref(decoded_frame);
2059 return err < 0 ? err : ret;
2062 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2064 AVFrame *decoded_frame, *f;
2065 int i, ret = 0, err = 0, resample_changed;
2066 int64_t best_effort_timestamp;
2067 AVRational *frame_sample_aspect;
2069 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2070 return AVERROR(ENOMEM);
2071 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2072 return AVERROR(ENOMEM);
2073 decoded_frame = ist->decoded_frame;
2074 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2076 update_benchmark(NULL);
2077 ret = avcodec_decode_video2(ist->dec_ctx,
2078 decoded_frame, got_output, pkt);
2079 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2081 // The following line may be required in some cases where there is no parser
2082 // or the parser does not has_b_frames correctly
2083 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2084 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2085 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2087 av_log(ist->dec_ctx, AV_LOG_WARNING,
2088 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2089 "If you want to help, upload a sample "
2090 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2091 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2092 ist->dec_ctx->has_b_frames,
2093 ist->st->codec->has_b_frames);
2096 check_decode_result(ist, got_output, ret);
2098 if (*got_output && ret >= 0) {
2099 if (ist->dec_ctx->width != decoded_frame->width ||
2100 ist->dec_ctx->height != decoded_frame->height ||
2101 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2102 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2103 decoded_frame->width,
2104 decoded_frame->height,
2105 decoded_frame->format,
2106 ist->dec_ctx->width,
2107 ist->dec_ctx->height,
2108 ist->dec_ctx->pix_fmt);
2112 if (!*got_output || ret < 0)
2115 if(ist->top_field_first>=0)
2116 decoded_frame->top_field_first = ist->top_field_first;
2118 ist->frames_decoded++;
2120 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2121 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2125 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2127 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2128 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2129 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2131 if (ts != AV_NOPTS_VALUE)
2132 ist->next_pts = ist->pts = ts;
2136 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2137 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2138 ist->st->index, av_ts2str(decoded_frame->pts),
2139 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2140 best_effort_timestamp,
2141 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2142 decoded_frame->key_frame, decoded_frame->pict_type,
2143 ist->st->time_base.num, ist->st->time_base.den);
2148 if (ist->st->sample_aspect_ratio.num)
2149 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2151 resample_changed = ist->resample_width != decoded_frame->width ||
2152 ist->resample_height != decoded_frame->height ||
2153 ist->resample_pix_fmt != decoded_frame->format;
2154 if (resample_changed) {
2155 av_log(NULL, AV_LOG_INFO,
2156 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2157 ist->file_index, ist->st->index,
2158 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2159 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2161 ist->resample_width = decoded_frame->width;
2162 ist->resample_height = decoded_frame->height;
2163 ist->resample_pix_fmt = decoded_frame->format;
2165 for (i = 0; i < nb_filtergraphs; i++) {
2166 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2167 configure_filtergraph(filtergraphs[i]) < 0) {
2168 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2174 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2175 for (i = 0; i < ist->nb_filters; i++) {
2176 if (!frame_sample_aspect->num)
2177 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2179 if (i < ist->nb_filters - 1) {
2180 f = ist->filter_frame;
2181 err = av_frame_ref(f, decoded_frame);
2186 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2187 if (ret == AVERROR_EOF) {
2188 ret = 0; /* ignore */
2189 } else if (ret < 0) {
2190 av_log(NULL, AV_LOG_FATAL,
2191 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2197 av_frame_unref(ist->filter_frame);
2198 av_frame_unref(decoded_frame);
2199 return err < 0 ? err : ret;
2202 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2204 AVSubtitle subtitle;
2205 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2206 &subtitle, got_output, pkt);
2208 check_decode_result(NULL, got_output, ret);
2210 if (ret < 0 || !*got_output) {
2212 sub2video_flush(ist);
2216 if (ist->fix_sub_duration) {
2218 if (ist->prev_sub.got_output) {
2219 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2220 1000, AV_TIME_BASE);
2221 if (end < ist->prev_sub.subtitle.end_display_time) {
2222 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2223 "Subtitle duration reduced from %d to %d%s\n",
2224 ist->prev_sub.subtitle.end_display_time, end,
2225 end <= 0 ? ", dropping it" : "");
2226 ist->prev_sub.subtitle.end_display_time = end;
2229 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2230 FFSWAP(int, ret, ist->prev_sub.ret);
2231 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2239 sub2video_update(ist, &subtitle);
2241 if (!subtitle.num_rects)
2244 ist->frames_decoded++;
2246 for (i = 0; i < nb_output_streams; i++) {
2247 OutputStream *ost = output_streams[i];
2249 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2250 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2253 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2257 avsubtitle_free(&subtitle);
2261 static int send_filter_eof(InputStream *ist)
2264 for (i = 0; i < ist->nb_filters; i++) {
2265 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2272 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2273 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2279 if (!ist->saw_first_ts) {
2280 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2282 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2283 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2284 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2286 ist->saw_first_ts = 1;
2289 if (ist->next_dts == AV_NOPTS_VALUE)
2290 ist->next_dts = ist->dts;
2291 if (ist->next_pts == AV_NOPTS_VALUE)
2292 ist->next_pts = ist->pts;
2296 av_init_packet(&avpkt);
2304 if (pkt->dts != AV_NOPTS_VALUE) {
2305 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2306 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2307 ist->next_pts = ist->pts = ist->dts;
2310 // while we have more to decode or while the decoder did output something on EOF
2311 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2315 ist->pts = ist->next_pts;
2316 ist->dts = ist->next_dts;
2318 if (avpkt.size && avpkt.size != pkt->size &&
2319 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2320 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2321 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2322 ist->showed_multi_packet_warning = 1;
2325 switch (ist->dec_ctx->codec_type) {
2326 case AVMEDIA_TYPE_AUDIO:
2327 ret = decode_audio (ist, &avpkt, &got_output);
2329 case AVMEDIA_TYPE_VIDEO:
2330 ret = decode_video (ist, &avpkt, &got_output);
2331 if (avpkt.duration) {
2332 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2333 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2334 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2335 duration = ((int64_t)AV_TIME_BASE *
2336 ist->dec_ctx->framerate.den * ticks) /
2337 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2341 if(ist->dts != AV_NOPTS_VALUE && duration) {
2342 ist->next_dts += duration;
2344 ist->next_dts = AV_NOPTS_VALUE;
2347 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2349 case AVMEDIA_TYPE_SUBTITLE:
2350 ret = transcode_subtitles(ist, &avpkt, &got_output);
2357 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2358 ist->file_index, ist->st->index, av_err2str(ret));
2365 avpkt.pts= AV_NOPTS_VALUE;
2367 // touch data and size only if not EOF
2369 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2377 if (got_output && !pkt)
2381 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2382 /* except when looping we need to flush but not to send an EOF */
2383 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2384 int ret = send_filter_eof(ist);
2386 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2391 /* handle stream copy */
2392 if (!ist->decoding_needed) {
2393 ist->dts = ist->next_dts;
2394 switch (ist->dec_ctx->codec_type) {
2395 case AVMEDIA_TYPE_AUDIO:
2396 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2397 ist->dec_ctx->sample_rate;
2399 case AVMEDIA_TYPE_VIDEO:
2400 if (ist->framerate.num) {
2401 // TODO: Remove work-around for c99-to-c89 issue 7
2402 AVRational time_base_q = AV_TIME_BASE_Q;
2403 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2404 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2405 } else if (pkt->duration) {
2406 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2407 } else if(ist->dec_ctx->framerate.num != 0) {
2408 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2409 ist->next_dts += ((int64_t)AV_TIME_BASE *
2410 ist->dec_ctx->framerate.den * ticks) /
2411 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2415 ist->pts = ist->dts;
2416 ist->next_pts = ist->next_dts;
2418 for (i = 0; pkt && i < nb_output_streams; i++) {
2419 OutputStream *ost = output_streams[i];
2421 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2424 do_streamcopy(ist, ost, pkt);
2430 static void print_sdp(void)
2435 AVIOContext *sdp_pb;
2436 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2440 for (i = 0, j = 0; i < nb_output_files; i++) {
2441 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2442 avc[j] = output_files[i]->ctx;
2450 av_sdp_create(avc, j, sdp, sizeof(sdp));
2452 if (!sdp_filename) {
2453 printf("SDP:\n%s\n", sdp);
2456 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2457 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2459 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2460 avio_closep(&sdp_pb);
2461 av_freep(&sdp_filename);
2469 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2472 for (i = 0; hwaccels[i].name; i++)
2473 if (hwaccels[i].pix_fmt == pix_fmt)
2474 return &hwaccels[i];
2478 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2480 InputStream *ist = s->opaque;
2481 const enum AVPixelFormat *p;
2484 for (p = pix_fmts; *p != -1; p++) {
2485 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2486 const HWAccel *hwaccel;
2488 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2491 hwaccel = get_hwaccel(*p);
2493 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2494 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2497 ret = hwaccel->init(s);
2499 if (ist->hwaccel_id == hwaccel->id) {
2500 av_log(NULL, AV_LOG_FATAL,
2501 "%s hwaccel requested for input stream #%d:%d, "
2502 "but cannot be initialized.\n", hwaccel->name,
2503 ist->file_index, ist->st->index);
2504 return AV_PIX_FMT_NONE;
2508 ist->active_hwaccel_id = hwaccel->id;
2509 ist->hwaccel_pix_fmt = *p;
2516 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2518 InputStream *ist = s->opaque;
2520 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2521 return ist->hwaccel_get_buffer(s, frame, flags);
2523 return avcodec_default_get_buffer2(s, frame, flags);
2526 static int init_input_stream(int ist_index, char *error, int error_len)
2529 InputStream *ist = input_streams[ist_index];
2531 if (ist->decoding_needed) {
2532 AVCodec *codec = ist->dec;
2534 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2535 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2536 return AVERROR(EINVAL);
2539 ist->dec_ctx->opaque = ist;
2540 ist->dec_ctx->get_format = get_format;
2541 ist->dec_ctx->get_buffer2 = get_buffer;
2542 ist->dec_ctx->thread_safe_callbacks = 1;
2544 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2545 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2546 (ist->decoding_needed & DECODING_FOR_OST)) {
2547 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2548 if (ist->decoding_needed & DECODING_FOR_FILTER)
2549 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2552 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2554 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2555 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2556 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2557 if (ret == AVERROR_EXPERIMENTAL)
2558 abort_codec_experimental(codec, 0);
2560 snprintf(error, error_len,
2561 "Error while opening decoder for input stream "
2563 ist->file_index, ist->st->index, av_err2str(ret));
2566 assert_avoptions(ist->decoder_opts);
2569 ist->next_pts = AV_NOPTS_VALUE;
2570 ist->next_dts = AV_NOPTS_VALUE;
2575 static InputStream *get_input_stream(OutputStream *ost)
2577 if (ost->source_index >= 0)
2578 return input_streams[ost->source_index];
2582 static int compare_int64(const void *a, const void *b)
2584 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2587 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2591 if (ost->encoding_needed) {
2592 AVCodec *codec = ost->enc;
2593 AVCodecContext *dec = NULL;
2596 if ((ist = get_input_stream(ost)))
2598 if (dec && dec->subtitle_header) {
2599 /* ASS code assumes this buffer is null terminated so add extra byte. */
2600 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2601 if (!ost->enc_ctx->subtitle_header)
2602 return AVERROR(ENOMEM);
2603 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2604 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2606 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2607 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2608 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2610 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2611 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2612 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2614 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2615 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2616 if (!ost->enc_ctx->hw_frames_ctx)
2617 return AVERROR(ENOMEM);
2620 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2621 if (ret == AVERROR_EXPERIMENTAL)
2622 abort_codec_experimental(codec, 1);
2623 snprintf(error, error_len,
2624 "Error while opening encoder for output stream #%d:%d - "
2625 "maybe incorrect parameters such as bit_rate, rate, width or height",
2626 ost->file_index, ost->index);
2629 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2630 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2631 av_buffersink_set_frame_size(ost->filter->filter,
2632 ost->enc_ctx->frame_size);
2633 assert_avoptions(ost->encoder_opts);
2634 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2635 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2636 " It takes bits/s as argument, not kbits/s\n");
2638 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2640 av_log(NULL, AV_LOG_FATAL,
2641 "Error initializing the output stream codec context.\n");
2645 if (ost->enc_ctx->nb_coded_side_data) {
2648 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2649 sizeof(*ost->st->side_data));
2650 if (!ost->st->side_data)
2651 return AVERROR(ENOMEM);
2653 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2654 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2655 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2657 sd_dst->data = av_malloc(sd_src->size);
2659 return AVERROR(ENOMEM);
2660 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2661 sd_dst->size = sd_src->size;
2662 sd_dst->type = sd_src->type;
2663 ost->st->nb_side_data++;
2667 // copy timebase while removing common factors
2668 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2669 ost->st->codec->codec= ost->enc_ctx->codec;
2671 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2673 av_log(NULL, AV_LOG_FATAL,
2674 "Error setting up codec context options.\n");
2677 // copy timebase while removing common factors
2678 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2684 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2685 AVCodecContext *avctx)
2688 int n = 1, i, size, index = 0;
2691 for (p = kf; *p; p++)
2695 pts = av_malloc_array(size, sizeof(*pts));
2697 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2702 for (i = 0; i < n; i++) {
2703 char *next = strchr(p, ',');
2708 if (!memcmp(p, "chapters", 8)) {
2710 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2713 if (avf->nb_chapters > INT_MAX - size ||
2714 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2716 av_log(NULL, AV_LOG_FATAL,
2717 "Could not allocate forced key frames array.\n");
2720 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2721 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2723 for (j = 0; j < avf->nb_chapters; j++) {
2724 AVChapter *c = avf->chapters[j];
2725 av_assert1(index < size);
2726 pts[index++] = av_rescale_q(c->start, c->time_base,
2727 avctx->time_base) + t;
2732 t = parse_time_or_die("force_key_frames", p, 1);
2733 av_assert1(index < size);
2734 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2741 av_assert0(index == size);
2742 qsort(pts, size, sizeof(*pts), compare_int64);
2743 ost->forced_kf_count = size;
2744 ost->forced_kf_pts = pts;
2747 static void report_new_stream(int input_index, AVPacket *pkt)
2749 InputFile *file = input_files[input_index];
2750 AVStream *st = file->ctx->streams[pkt->stream_index];
2752 if (pkt->stream_index < file->nb_streams_warn)
2754 av_log(file->ctx, AV_LOG_WARNING,
2755 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2756 av_get_media_type_string(st->codec->codec_type),
2757 input_index, pkt->stream_index,
2758 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2759 file->nb_streams_warn = pkt->stream_index + 1;
2762 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2764 AVDictionaryEntry *e;
2766 uint8_t *encoder_string;
2767 int encoder_string_len;
2768 int format_flags = 0;
2769 int codec_flags = 0;
2771 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2774 e = av_dict_get(of->opts, "fflags", NULL, 0);
2776 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2779 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2781 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2783 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2786 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2789 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2790 encoder_string = av_mallocz(encoder_string_len);
2791 if (!encoder_string)
2794 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2795 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2797 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2798 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2799 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2800 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2803 static int transcode_init(void)
2805 int ret = 0, i, j, k;
2806 AVFormatContext *oc;
2809 char error[1024] = {0};
2812 for (i = 0; i < nb_filtergraphs; i++) {
2813 FilterGraph *fg = filtergraphs[i];
2814 for (j = 0; j < fg->nb_outputs; j++) {
2815 OutputFilter *ofilter = fg->outputs[j];
2816 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2818 if (fg->nb_inputs != 1)
2820 for (k = nb_input_streams-1; k >= 0 ; k--)
2821 if (fg->inputs[0]->ist == input_streams[k])
2823 ofilter->ost->source_index = k;
2827 /* init framerate emulation */
2828 for (i = 0; i < nb_input_files; i++) {
2829 InputFile *ifile = input_files[i];
2830 if (ifile->rate_emu)
2831 for (j = 0; j < ifile->nb_streams; j++)
2832 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2835 /* for each output stream, we compute the right encoding parameters */
2836 for (i = 0; i < nb_output_streams; i++) {
2837 AVCodecContext *enc_ctx;
2838 AVCodecContext *dec_ctx = NULL;
2839 ost = output_streams[i];
2840 oc = output_files[ost->file_index]->ctx;
2841 ist = get_input_stream(ost);
2843 if (ost->attachment_filename)
2846 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2849 dec_ctx = ist->dec_ctx;
2851 ost->st->disposition = ist->st->disposition;
2852 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2853 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2855 for (j=0; j<oc->nb_streams; j++) {
2856 AVStream *st = oc->streams[j];
2857 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2860 if (j == oc->nb_streams)
2861 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2862 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2865 if (ost->stream_copy) {
2867 uint64_t extra_size;
2869 av_assert0(ist && !ost->filter);
2871 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2873 if (extra_size > INT_MAX) {
2874 return AVERROR(EINVAL);
2877 /* if stream_copy is selected, no need to decode or encode */
2878 enc_ctx->codec_id = dec_ctx->codec_id;
2879 enc_ctx->codec_type = dec_ctx->codec_type;
2881 if (!enc_ctx->codec_tag) {
2882 unsigned int codec_tag;
2883 if (!oc->oformat->codec_tag ||
2884 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2885 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2886 enc_ctx->codec_tag = dec_ctx->codec_tag;
2889 enc_ctx->bit_rate = dec_ctx->bit_rate;
2890 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2891 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2892 enc_ctx->field_order = dec_ctx->field_order;
2893 if (dec_ctx->extradata_size) {
2894 enc_ctx->extradata = av_mallocz(extra_size);
2895 if (!enc_ctx->extradata) {
2896 return AVERROR(ENOMEM);
2898 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2900 enc_ctx->extradata_size= dec_ctx->extradata_size;
2901 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2903 enc_ctx->time_base = ist->st->time_base;
2905 * Avi is a special case here because it supports variable fps but
2906 * having the fps and timebase differe significantly adds quite some
2909 if(!strcmp(oc->oformat->name, "avi")) {
2910 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2911 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2912 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2913 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2915 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2916 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2917 enc_ctx->ticks_per_frame = 2;
2918 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2919 && av_q2d(ist->st->time_base) < 1.0/500
2921 enc_ctx->time_base = dec_ctx->time_base;
2922 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2923 enc_ctx->time_base.den *= 2;
2924 enc_ctx->ticks_per_frame = 2;
2926 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2927 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2928 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2929 && strcmp(oc->oformat->name, "f4v")
2931 if( copy_tb<0 && dec_ctx->time_base.den
2932 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2933 && av_q2d(ist->st->time_base) < 1.0/500
2935 enc_ctx->time_base = dec_ctx->time_base;
2936 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2939 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2940 && dec_ctx->time_base.num < dec_ctx->time_base.den
2941 && dec_ctx->time_base.num > 0
2942 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2943 enc_ctx->time_base = dec_ctx->time_base;
2946 if (!ost->frame_rate.num)
2947 ost->frame_rate = ist->framerate;
2948 if(ost->frame_rate.num)
2949 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2951 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2952 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2954 if (ist->st->nb_side_data) {
2955 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2956 sizeof(*ist->st->side_data));
2957 if (!ost->st->side_data)
2958 return AVERROR(ENOMEM);
2960 ost->st->nb_side_data = 0;
2961 for (j = 0; j < ist->st->nb_side_data; j++) {
2962 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2963 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2965 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2968 sd_dst->data = av_malloc(sd_src->size);
2970 return AVERROR(ENOMEM);
2971 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2972 sd_dst->size = sd_src->size;
2973 sd_dst->type = sd_src->type;
2974 ost->st->nb_side_data++;
2978 ost->parser = av_parser_init(enc_ctx->codec_id);
2980 switch (enc_ctx->codec_type) {
2981 case AVMEDIA_TYPE_AUDIO:
2982 if (audio_volume != 256) {
2983 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2986 enc_ctx->channel_layout = dec_ctx->channel_layout;
2987 enc_ctx->sample_rate = dec_ctx->sample_rate;
2988 enc_ctx->channels = dec_ctx->channels;
2989 enc_ctx->frame_size = dec_ctx->frame_size;
2990 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2991 enc_ctx->block_align = dec_ctx->block_align;
2992 enc_ctx->initial_padding = dec_ctx->delay;
2993 enc_ctx->profile = dec_ctx->profile;
2994 #if FF_API_AUDIOENC_DELAY
2995 enc_ctx->delay = dec_ctx->delay;
2997 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2998 enc_ctx->block_align= 0;
2999 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3000 enc_ctx->block_align= 0;
3002 case AVMEDIA_TYPE_VIDEO:
3003 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3004 enc_ctx->width = dec_ctx->width;
3005 enc_ctx->height = dec_ctx->height;
3006 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3007 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3009 av_mul_q(ost->frame_aspect_ratio,
3010 (AVRational){ enc_ctx->height, enc_ctx->width });
3011 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3012 "with stream copy may produce invalid files\n");
3014 else if (ist->st->sample_aspect_ratio.num)
3015 sar = ist->st->sample_aspect_ratio;
3017 sar = dec_ctx->sample_aspect_ratio;
3018 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3019 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3020 ost->st->r_frame_rate = ist->st->r_frame_rate;
3022 case AVMEDIA_TYPE_SUBTITLE:
3023 enc_ctx->width = dec_ctx->width;
3024 enc_ctx->height = dec_ctx->height;
3026 case AVMEDIA_TYPE_UNKNOWN:
3027 case AVMEDIA_TYPE_DATA:
3028 case AVMEDIA_TYPE_ATTACHMENT:
3035 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3037 /* should only happen when a default codec is not present. */
3038 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3039 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3040 ret = AVERROR(EINVAL);
3044 set_encoder_id(output_files[ost->file_index], ost);
3047 if (qsv_transcode_init(ost))
3052 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3053 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3055 fg = init_simple_filtergraph(ist, ost);
3056 if (configure_filtergraph(fg)) {
3057 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3062 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3063 if (!ost->frame_rate.num)
3064 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3065 if (ist && !ost->frame_rate.num)
3066 ost->frame_rate = ist->framerate;
3067 if (ist && !ost->frame_rate.num)
3068 ost->frame_rate = ist->st->r_frame_rate;
3069 if (ist && !ost->frame_rate.num) {
3070 ost->frame_rate = (AVRational){25, 1};
3071 av_log(NULL, AV_LOG_WARNING,
3073 "about the input framerate is available. Falling "
3074 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3075 "if you want a different framerate.\n",
3076 ost->file_index, ost->index);
3078 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3079 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3080 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3081 ost->frame_rate = ost->enc->supported_framerates[idx];
3083 // reduce frame rate for mpeg4 to be within the spec limits
3084 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3085 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3086 ost->frame_rate.num, ost->frame_rate.den, 65535);
3090 switch (enc_ctx->codec_type) {
3091 case AVMEDIA_TYPE_AUDIO:
3092 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3093 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3094 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3095 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3096 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3098 case AVMEDIA_TYPE_VIDEO:
3099 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3100 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3101 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3102 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3103 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3104 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3105 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3107 for (j = 0; j < ost->forced_kf_count; j++)
3108 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3110 enc_ctx->time_base);
3112 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3113 enc_ctx->height = ost->filter->filter->inputs[0]->h;
3114 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3115 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3116 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3117 ost->filter->filter->inputs[0]->sample_aspect_ratio;
3118 if (!strncmp(ost->enc->name, "libx264", 7) &&
3119 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3120 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3121 av_log(NULL, AV_LOG_WARNING,
3122 "No pixel format specified, %s for H.264 encoding chosen.\n"
3123 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3124 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3125 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3126 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3127 ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
3128 av_log(NULL, AV_LOG_WARNING,
3129 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3130 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3131 av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
3132 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3134 ost->st->avg_frame_rate = ost->frame_rate;
3137 enc_ctx->width != dec_ctx->width ||
3138 enc_ctx->height != dec_ctx->height ||
3139 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3140 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3143 if (ost->forced_keyframes) {
3144 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3145 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3146 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3148 av_log(NULL, AV_LOG_ERROR,
3149 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3152 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3153 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3154 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3155 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3157 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3158 // parse it only for static kf timings
3159 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3160 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3164 case AVMEDIA_TYPE_SUBTITLE:
3165 enc_ctx->time_base = (AVRational){1, 1000};
3166 if (!enc_ctx->width) {
3167 enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3168 enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3171 case AVMEDIA_TYPE_DATA:
3179 if (ost->disposition) {
3180 static const AVOption opts[] = {
3181 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3182 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3183 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3184 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3185 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3186 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3187 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3188 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3189 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3190 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3191 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3192 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3193 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3194 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3197 static const AVClass class = {
3199 .item_name = av_default_item_name,
3201 .version = LIBAVUTIL_VERSION_INT,
3203 const AVClass *pclass = &class;
3205 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3211 /* init input streams */
3212 for (i = 0; i < nb_input_streams; i++)
3213 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3214 for (i = 0; i < nb_output_streams; i++) {
3215 ost = output_streams[i];
3216 avcodec_close(ost->enc_ctx);
3221 /* open each encoder */
3222 for (i = 0; i < nb_output_streams; i++) {
3223 ret = init_output_stream(output_streams[i], error, sizeof(error));
3228 /* discard unused programs */
3229 for (i = 0; i < nb_input_files; i++) {
3230 InputFile *ifile = input_files[i];
3231 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3232 AVProgram *p = ifile->ctx->programs[j];
3233 int discard = AVDISCARD_ALL;
3235 for (k = 0; k < p->nb_stream_indexes; k++)
3236 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3237 discard = AVDISCARD_DEFAULT;
3240 p->discard = discard;
3244 /* open files and write file headers */
3245 for (i = 0; i < nb_output_files; i++) {
3246 oc = output_files[i]->ctx;
3247 oc->interrupt_callback = int_cb;
3248 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3249 snprintf(error, sizeof(error),
3250 "Could not write header for output file #%d "
3251 "(incorrect codec parameters ?): %s",
3252 i, av_err2str(ret));
3253 ret = AVERROR(EINVAL);
3256 // assert_avoptions(output_files[i]->opts);
3257 if (strcmp(oc->oformat->name, "rtp")) {
3263 /* dump the file output parameters - cannot be done before in case
3265 for (i = 0; i < nb_output_files; i++) {
3266 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3269 /* dump the stream mapping */
3270 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3271 for (i = 0; i < nb_input_streams; i++) {
3272 ist = input_streams[i];
3274 for (j = 0; j < ist->nb_filters; j++) {
3275 if (ist->filters[j]->graph->graph_desc) {
3276 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3277 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3278 ist->filters[j]->name);
3279 if (nb_filtergraphs > 1)
3280 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3281 av_log(NULL, AV_LOG_INFO, "\n");
3286 for (i = 0; i < nb_output_streams; i++) {
3287 ost = output_streams[i];
3289 if (ost->attachment_filename) {
3290 /* an attached file */
3291 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3292 ost->attachment_filename, ost->file_index, ost->index);
3296 if (ost->filter && ost->filter->graph->graph_desc) {
3297 /* output from a complex graph */
3298 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3299 if (nb_filtergraphs > 1)
3300 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3302 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3303 ost->index, ost->enc ? ost->enc->name : "?");
3307 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3308 input_streams[ost->source_index]->file_index,
3309 input_streams[ost->source_index]->st->index,
3312 if (ost->sync_ist != input_streams[ost->source_index])
3313 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3314 ost->sync_ist->file_index,
3315 ost->sync_ist->st->index);
3316 if (ost->stream_copy)
3317 av_log(NULL, AV_LOG_INFO, " (copy)");
3319 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3320 const AVCodec *out_codec = ost->enc;
3321 const char *decoder_name = "?";
3322 const char *in_codec_name = "?";
3323 const char *encoder_name = "?";
3324 const char *out_codec_name = "?";
3325 const AVCodecDescriptor *desc;
3328 decoder_name = in_codec->name;
3329 desc = avcodec_descriptor_get(in_codec->id);
3331 in_codec_name = desc->name;
3332 if (!strcmp(decoder_name, in_codec_name))
3333 decoder_name = "native";
3337 encoder_name = out_codec->name;
3338 desc = avcodec_descriptor_get(out_codec->id);
3340 out_codec_name = desc->name;
3341 if (!strcmp(encoder_name, out_codec_name))
3342 encoder_name = "native";
3345 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3346 in_codec_name, decoder_name,
3347 out_codec_name, encoder_name);
3349 av_log(NULL, AV_LOG_INFO, "\n");
3353 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3357 if (sdp_filename || want_sdp) {
3361 transcode_init_done = 1;
3366 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3367 static int need_output(void)
3371 for (i = 0; i < nb_output_streams; i++) {
3372 OutputStream *ost = output_streams[i];
3373 OutputFile *of = output_files[ost->file_index];
3374 AVFormatContext *os = output_files[ost->file_index]->ctx;
3376 if (ost->finished ||
3377 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3379 if (ost->frame_number >= ost->max_frames) {
3381 for (j = 0; j < of->ctx->nb_streams; j++)
3382 close_output_stream(output_streams[of->ost_index + j]);
3393 * Select the output stream to process.
3395 * @return selected output stream, or NULL if none available
3397 static OutputStream *choose_output(void)
3400 int64_t opts_min = INT64_MAX;
3401 OutputStream *ost_min = NULL;
3403 for (i = 0; i < nb_output_streams; i++) {
3404 OutputStream *ost = output_streams[i];
3405 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3406 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3408 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3409 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3411 if (!ost->finished && opts < opts_min) {
3413 ost_min = ost->unavailable ? NULL : ost;
3419 static void set_tty_echo(int on)
3423 if (tcgetattr(0, &tty) == 0) {
3424 if (on) tty.c_lflag |= ECHO;
3425 else tty.c_lflag &= ~ECHO;
3426 tcsetattr(0, TCSANOW, &tty);
3431 static int check_keyboard_interaction(int64_t cur_time)
3434 static int64_t last_time;
3435 if (received_nb_signals)
3436 return AVERROR_EXIT;
3437 /* read_key() returns 0 on EOF */
3438 if(cur_time - last_time >= 100000 && !run_as_daemon){
3440 last_time = cur_time;
3444 return AVERROR_EXIT;
3445 if (key == '+') av_log_set_level(av_log_get_level()+10);
3446 if (key == '-') av_log_set_level(av_log_get_level()-10);
3447 if (key == 's') qp_hist ^= 1;
3450 do_hex_dump = do_pkt_dump = 0;
3451 } else if(do_pkt_dump){
3455 av_log_set_level(AV_LOG_DEBUG);
3457 if (key == 'c' || key == 'C'){
3458 char buf[4096], target[64], command[256], arg[256] = {0};
3461 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3464 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3469 fprintf(stderr, "\n");
3471 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3472 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3473 target, time, command, arg);
3474 for (i = 0; i < nb_filtergraphs; i++) {
3475 FilterGraph *fg = filtergraphs[i];
3478 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3479 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3480 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3481 } else if (key == 'c') {
3482 fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3483 ret = AVERROR_PATCHWELCOME;
3485 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3487 fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3492 av_log(NULL, AV_LOG_ERROR,
3493 "Parse error, at least 3 arguments were expected, "
3494 "only %d given in string '%s'\n", n, buf);
3497 if (key == 'd' || key == 'D'){
3500 debug = input_streams[0]->st->codec->debug<<1;
3501 if(!debug) debug = 1;
3502 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3509 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3514 fprintf(stderr, "\n");
3515 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3516 fprintf(stderr,"error parsing debug value\n");
3518 for(i=0;i<nb_input_streams;i++) {
3519 input_streams[i]->st->codec->debug = debug;
3521 for(i=0;i<nb_output_streams;i++) {
3522 OutputStream *ost = output_streams[i];
3523 ost->enc_ctx->debug = debug;
3525 if(debug) av_log_set_level(AV_LOG_DEBUG);
3526 fprintf(stderr,"debug=%d\n", debug);
3529 fprintf(stderr, "key function\n"
3530 "? show this help\n"
3531 "+ increase verbosity\n"
3532 "- decrease verbosity\n"
3533 "c Send command to first matching filter supporting it\n"
3534 "C Send/Que command to all matching filters\n"
3535 "D cycle through available debug modes\n"
3536 "h dump packets/hex press to cycle through the 3 states\n"
3538 "s Show QP histogram\n"
3545 static void *input_thread(void *arg)
3548 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3553 ret = av_read_frame(f->ctx, &pkt);
3555 if (ret == AVERROR(EAGAIN)) {
3560 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3563 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3564 if (flags && ret == AVERROR(EAGAIN)) {
3566 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3567 av_log(f->ctx, AV_LOG_WARNING,
3568 "Thread message queue blocking; consider raising the "
3569 "thread_queue_size option (current value: %d)\n",
3570 f->thread_queue_size);
3573 if (ret != AVERROR_EOF)
3574 av_log(f->ctx, AV_LOG_ERROR,
3575 "Unable to send packet to main thread: %s\n",
3577 av_packet_unref(&pkt);
3578 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3586 static void free_input_threads(void)
3590 for (i = 0; i < nb_input_files; i++) {
3591 InputFile *f = input_files[i];
3594 if (!f || !f->in_thread_queue)
3596 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3597 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3598 av_packet_unref(&pkt);
3600 pthread_join(f->thread, NULL);
3602 av_thread_message_queue_free(&f->in_thread_queue);
3606 static int init_input_threads(void)
3610 if (nb_input_files == 1)
3613 for (i = 0; i < nb_input_files; i++) {
3614 InputFile *f = input_files[i];
3616 if (f->ctx->pb ? !f->ctx->pb->seekable :
3617 strcmp(f->ctx->iformat->name, "lavfi"))
3618 f->non_blocking = 1;
3619 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3620 f->thread_queue_size, sizeof(AVPacket));
3624 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3625 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3626 av_thread_message_queue_free(&f->in_thread_queue);
3627 return AVERROR(ret);
3633 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3635 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3637 AV_THREAD_MESSAGE_NONBLOCK : 0);
3641 static int get_input_packet(InputFile *f, AVPacket *pkt)
3645 for (i = 0; i < f->nb_streams; i++) {
3646 InputStream *ist = input_streams[f->ist_index + i];
3647 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3648 int64_t now = av_gettime_relative() - ist->start;
3650 return AVERROR(EAGAIN);
3655 if (nb_input_files > 1)
3656 return get_input_packet_mt(f, pkt);
3658 return av_read_frame(f->ctx, pkt);
3661 static int got_eagain(void)
3664 for (i = 0; i < nb_output_streams; i++)
3665 if (output_streams[i]->unavailable)
3670 static void reset_eagain(void)
3673 for (i = 0; i < nb_input_files; i++)
3674 input_files[i]->eagain = 0;
3675 for (i = 0; i < nb_output_streams; i++)
3676 output_streams[i]->unavailable = 0;
3679 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3680 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3681 AVRational time_base)
3687 return tmp_time_base;
3690 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3693 return tmp_time_base;
3699 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3702 AVCodecContext *avctx;
3703 int i, ret, has_audio = 0;
3704 int64_t duration = 0;
3706 ret = av_seek_frame(is, -1, is->start_time, 0);
3710 for (i = 0; i < ifile->nb_streams; i++) {
3711 ist = input_streams[ifile->ist_index + i];
3712 avctx = ist->dec_ctx;
3715 if (ist->decoding_needed) {
3716 process_input_packet(ist, NULL, 1);
3717 avcodec_flush_buffers(avctx);
3720 /* duration is the length of the last frame in a stream
3721 * when audio stream is present we don't care about
3722 * last video frame length because it's not defined exactly */
3723 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3727 for (i = 0; i < ifile->nb_streams; i++) {
3728 ist = input_streams[ifile->ist_index + i];
3729 avctx = ist->dec_ctx;
3732 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3733 AVRational sample_rate = {1, avctx->sample_rate};
3735 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3739 if (ist->framerate.num) {
3740 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3741 } else if (ist->st->avg_frame_rate.num) {
3742 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3743 } else duration = 1;
3745 if (!ifile->duration)
3746 ifile->time_base = ist->st->time_base;
3747 /* the total duration of the stream, max_pts - min_pts is
3748 * the duration of the stream without the last frame */
3749 duration += ist->max_pts - ist->min_pts;
3750 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3754 if (ifile->loop > 0)
3762 * - 0 -- one packet was read and processed
3763 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3764 * this function should be called again
3765 * - AVERROR_EOF -- this function should not be called again
3767 static int process_input(int file_index)
3769 InputFile *ifile = input_files[file_index];
3770 AVFormatContext *is;
3778 ret = get_input_packet(ifile, &pkt);
3780 if (ret == AVERROR(EAGAIN)) {
3784 if (ret < 0 && ifile->loop) {
3785 if ((ret = seek_to_start(ifile, is)) < 0)
3787 ret = get_input_packet(ifile, &pkt);
3790 if (ret != AVERROR_EOF) {
3791 print_error(is->filename, ret);
3796 for (i = 0; i < ifile->nb_streams; i++) {
3797 ist = input_streams[ifile->ist_index + i];
3798 if (ist->decoding_needed) {
3799 ret = process_input_packet(ist, NULL, 0);
3804 /* mark all outputs that don't go through lavfi as finished */
3805 for (j = 0; j < nb_output_streams; j++) {
3806 OutputStream *ost = output_streams[j];
3808 if (ost->source_index == ifile->ist_index + i &&
3809 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3810 finish_output_stream(ost);
3814 ifile->eof_reached = 1;
3815 return AVERROR(EAGAIN);
3821 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
3822 is->streams[pkt.stream_index]);
3824 /* the following test is needed in case new streams appear
3825 dynamically in stream : we ignore them */
3826 if (pkt.stream_index >= ifile->nb_streams) {
3827 report_new_stream(file_index, &pkt);
3828 goto discard_packet;
3831 ist = input_streams[ifile->ist_index + pkt.stream_index];
3833 ist->data_size += pkt.size;
3837 goto discard_packet;
3839 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3840 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3845 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3846 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3847 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3848 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
3849 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
3850 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3851 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3852 av_ts2str(input_files[ist->file_index]->ts_offset),
3853 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3856 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3857 int64_t stime, stime2;
3858 // Correcting starttime based on the enabled streams
3859 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3860 // so we instead do it here as part of discontinuity handling
3861 if ( ist->next_dts == AV_NOPTS_VALUE
3862 && ifile->ts_offset == -is->start_time
3863 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3864 int64_t new_start_time = INT64_MAX;
3865 for (i=0; i<is->nb_streams; i++) {
3866 AVStream *st = is->streams[i];
3867 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3869 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3871 if (new_start_time > is->start_time) {
3872 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3873 ifile->ts_offset = -new_start_time;
3877 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3878 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3879 ist->wrap_correction_done = 1;
3881 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3882 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3883 ist->wrap_correction_done = 0;
3885 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3886 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3887 ist->wrap_correction_done = 0;
3891 /* add the stream-global side data to the first packet */
3892 if (ist->nb_packets == 1) {
3893 if (ist->st->nb_side_data)
3894 av_packet_split_side_data(&pkt);
3895 for (i = 0; i < ist->st->nb_side_data; i++) {
3896 AVPacketSideData *src_sd = &ist->st->side_data[i];
3899 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3901 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3904 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3908 memcpy(dst_data, src_sd->data, src_sd->size);
3912 if (pkt.dts != AV_NOPTS_VALUE)
3913 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3914 if (pkt.pts != AV_NOPTS_VALUE)
3915 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3917 if (pkt.pts != AV_NOPTS_VALUE)
3918 pkt.pts *= ist->ts_scale;
3919 if (pkt.dts != AV_NOPTS_VALUE)
3920 pkt.dts *= ist->ts_scale;
3922 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3923 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3924 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3925 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3926 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3927 int64_t delta = pkt_dts - ifile->last_ts;
3928 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3929 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3930 ifile->ts_offset -= delta;
3931 av_log(NULL, AV_LOG_DEBUG,
3932 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3933 delta, ifile->ts_offset);
3934 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3935 if (pkt.pts != AV_NOPTS_VALUE)
3936 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3940 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3941 if (pkt.pts != AV_NOPTS_VALUE) {
3942 pkt.pts += duration;
3943 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3944 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3947 if (pkt.dts != AV_NOPTS_VALUE)
3948 pkt.dts += duration;
3950 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
3951 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3952 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3953 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3955 int64_t delta = pkt_dts - ist->next_dts;
3956 if (is->iformat->flags & AVFMT_TS_DISCONT) {
3957 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3958 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3959 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3960 ifile->ts_offset -= delta;
3961 av_log(NULL, AV_LOG_DEBUG,
3962 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3963 delta, ifile->ts_offset);
3964 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3965 if (pkt.pts != AV_NOPTS_VALUE)
3966 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3969 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3970 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3971 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3972 pkt.dts = AV_NOPTS_VALUE;
3974 if (pkt.pts != AV_NOPTS_VALUE){
3975 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3976 delta = pkt_pts - ist->next_dts;
3977 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3978 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3979 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3980 pkt.pts = AV_NOPTS_VALUE;
3986 if (pkt.dts != AV_NOPTS_VALUE)
3987 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3990 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3991 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
3992 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3993 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3994 av_ts2str(input_files[ist->file_index]->ts_offset),
3995 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3998 sub2video_heartbeat(ist, pkt.pts);
4000 process_input_packet(ist, &pkt, 0);
4003 av_packet_unref(&pkt);
4009 * Perform a step of transcoding for the specified filter graph.
4011 * @param[in] graph filter graph to consider
4012 * @param[out] best_ist input stream where a frame would allow to continue
4013 * @return 0 for success, <0 for error
4015 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4018 int nb_requests, nb_requests_max = 0;
4019 InputFilter *ifilter;
4023 ret = avfilter_graph_request_oldest(graph->graph);
4025 return reap_filters(0);
4027 if (ret == AVERROR_EOF) {
4028 ret = reap_filters(1);
4029 for (i = 0; i < graph->nb_outputs; i++)
4030 close_output_stream(graph->outputs[i]->ost);
4033 if (ret != AVERROR(EAGAIN))
4036 for (i = 0; i < graph->nb_inputs; i++) {
4037 ifilter = graph->inputs[i];
4039 if (input_files[ist->file_index]->eagain ||
4040 input_files[ist->file_index]->eof_reached)
4042 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4043 if (nb_requests > nb_requests_max) {
4044 nb_requests_max = nb_requests;
4050 for (i = 0; i < graph->nb_outputs; i++)
4051 graph->outputs[i]->ost->unavailable = 1;
4057 * Run a single step of transcoding.
4059 * @return 0 for success, <0 for error
4061 static int transcode_step(void)
4067 ost = choose_output();
4074 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4079 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4084 av_assert0(ost->source_index >= 0);
4085 ist = input_streams[ost->source_index];
4088 ret = process_input(ist->file_index);
4089 if (ret == AVERROR(EAGAIN)) {
4090 if (input_files[ist->file_index]->eagain)
4091 ost->unavailable = 1;
4096 return ret == AVERROR_EOF ? 0 : ret;
4098 return reap_filters(0);
4102 * The following code is the main loop of the file converter
4104 static int transcode(void)
4107 AVFormatContext *os;
4110 int64_t timer_start;
4111 int64_t total_packets_written = 0;
4113 ret = transcode_init();
4117 if (stdin_interaction) {
4118 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4121 timer_start = av_gettime_relative();
4124 if ((ret = init_input_threads()) < 0)
4128 while (!received_sigterm) {
4129 int64_t cur_time= av_gettime_relative();
4131 /* if 'q' pressed, exits */
4132 if (stdin_interaction)
4133 if (check_keyboard_interaction(cur_time) < 0)
4136 /* check if there's any stream where output is still needed */
4137 if (!need_output()) {
4138 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4142 ret = transcode_step();
4143 if (ret < 0 && ret != AVERROR_EOF) {
4145 av_strerror(ret, errbuf, sizeof(errbuf));
4147 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4151 /* dump report by using the output first video and audio streams */
4152 print_report(0, timer_start, cur_time);
4155 free_input_threads();
4158 /* at the end of stream, we must flush the decoder buffers */
4159 for (i = 0; i < nb_input_streams; i++) {
4160 ist = input_streams[i];
4161 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4162 process_input_packet(ist, NULL, 0);
4169 /* write the trailer if needed and close file */
4170 for (i = 0; i < nb_output_files; i++) {
4171 os = output_files[i]->ctx;
4172 if ((ret = av_write_trailer(os)) < 0) {
4173 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4179 /* dump report by using the first video and audio streams */
4180 print_report(1, timer_start, av_gettime_relative());
4182 /* close each encoder */
4183 for (i = 0; i < nb_output_streams; i++) {
4184 ost = output_streams[i];
4185 if (ost->encoding_needed) {
4186 av_freep(&ost->enc_ctx->stats_in);
4188 total_packets_written += ost->packets_written;
4191 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4192 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4196 /* close each decoder */
4197 for (i = 0; i < nb_input_streams; i++) {
4198 ist = input_streams[i];
4199 if (ist->decoding_needed) {
4200 avcodec_close(ist->dec_ctx);
4201 if (ist->hwaccel_uninit)
4202 ist->hwaccel_uninit(ist->dec_ctx);
4211 free_input_threads();
4214 if (output_streams) {
4215 for (i = 0; i < nb_output_streams; i++) {
4216 ost = output_streams[i];
4219 if (fclose(ost->logfile))
4220 av_log(NULL, AV_LOG_ERROR,
4221 "Error closing logfile, loss of information possible: %s\n",
4222 av_err2str(AVERROR(errno)));
4223 ost->logfile = NULL;
4225 av_freep(&ost->forced_kf_pts);
4226 av_freep(&ost->apad);
4227 av_freep(&ost->disposition);
4228 av_dict_free(&ost->encoder_opts);
4229 av_dict_free(&ost->sws_dict);
4230 av_dict_free(&ost->swr_opts);
4231 av_dict_free(&ost->resample_opts);
4239 static int64_t getutime(void)
4242 struct rusage rusage;
4244 getrusage(RUSAGE_SELF, &rusage);
4245 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4246 #elif HAVE_GETPROCESSTIMES
4248 FILETIME c, e, k, u;
4249 proc = GetCurrentProcess();
4250 GetProcessTimes(proc, &c, &e, &k, &u);
4251 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4253 return av_gettime_relative();
4257 static int64_t getmaxrss(void)
4259 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4260 struct rusage rusage;
4261 getrusage(RUSAGE_SELF, &rusage);
4262 return (int64_t)rusage.ru_maxrss * 1024;
4263 #elif HAVE_GETPROCESSMEMORYINFO
4265 PROCESS_MEMORY_COUNTERS memcounters;
4266 proc = GetCurrentProcess();
4267 memcounters.cb = sizeof(memcounters);
4268 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4269 return memcounters.PeakPagefileUsage;
4275 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4279 int main(int argc, char **argv)
4284 register_exit(ffmpeg_cleanup);
4286 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4288 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4289 parse_loglevel(argc, argv, options);
4291 if(argc>1 && !strcmp(argv[1], "-d")){
4293 av_log_set_callback(log_callback_null);
4298 avcodec_register_all();
4300 avdevice_register_all();
4302 avfilter_register_all();
4304 avformat_network_init();
4306 show_banner(argc, argv, options);
4310 /* parse options and open all input/output files */
4311 ret = ffmpeg_parse_options(argc, argv);
4315 if (nb_output_files <= 0 && nb_input_files == 0) {
4317 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4321 /* file converter / grab */
4322 if (nb_output_files <= 0) {
4323 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4327 // if (nb_input_files == 0) {
4328 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4332 current_time = ti = getutime();
4333 if (transcode() < 0)
4335 ti = getutime() - ti;
4337 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4339 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4340 decode_error_stat[0], decode_error_stat[1]);
4341 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4344 exit_program(received_nb_signals ? 255 : main_return_code);
4345 return main_return_code;